blob: c8bb5370186eca17d08eaae9531c33690de13758 [file] [log] [blame]
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
Elliott Hughes3bb81562011-10-21 18:52:59 -070019#include <sys/uio.h>
20
Elliott Hughes545a0642011-11-08 19:10:03 -080021#include <set>
22
Ian Rogers166db042013-07-26 12:05:57 -070023#include "arch/context.h"
Elliott Hughes545a0642011-11-08 19:10:03 -080024#include "class_linker.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080025#include "class_linker-inl.h"
Ian Rogers4f6ad8a2013-03-18 15:27:28 -070026#include "dex_file-inl.h"
Ian Rogers776ac1f2012-04-13 23:36:36 -070027#include "dex_instruction.h"
Ian Rogers22d5e732014-07-15 22:23:51 -070028#include "field_helper.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070032#include "handle_scope.h"
Elliott Hughes64f574f2013-02-20 14:57:12 -080033#include "jdwp/object_registry.h"
Ian Rogers22d5e732014-07-15 22:23:51 -070034#include "method_helper.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070035#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080037#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070042#include "mirror/string-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "mirror/throwable.h"
Sebastien Hertza76a6d42014-03-20 16:40:17 +010044#include "quick/inline_method_analyser.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070045#include "reflection.h"
Elliott Hughesa0e18062012-04-13 15:59:59 -070046#include "safe_map.h"
Elliott Hughes64f574f2013-02-20 14:57:12 -080047#include "scoped_thread_state_change.h"
Elliott Hughes6a5bd492011-10-28 14:33:57 -070048#include "ScopedLocalRef.h"
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -070049#include "ScopedPrimitiveArray.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070050#include "handle_scope-inl.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070051#include "thread_list.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080052#include "throw_location.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080053#include "utf.h"
Sebastien Hertza76a6d42014-03-20 16:40:17 +010054#include "verifier/method_verifier-inl.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070055#include "well_known_classes.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070056
Brian Carlstrom3d92d522013-07-12 09:03:08 -070057#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
Elliott Hughes872d4ec2011-10-21 17:07:15 -070061namespace art {
62
Brian Carlstrom7934ac22013-07-26 10:54:15 -070063static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
Brian Carlstrom306db812014-09-05 13:01:41 -070064static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68 if (alloc_record_count > 0xffff) {
69 return 0xffff;
70 }
71 return alloc_record_count;
72}
Elliott Hughes475fc232011-10-25 15:00:35 -070073
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -070074class AllocRecordStackTraceElement {
75 public:
76 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -080077 }
78
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -070079 int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80 mirror::ArtMethod* method = Method();
81 DCHECK(method != nullptr);
82 return method->GetLineNumFromDexPC(DexPc());
Elliott Hughes545a0642011-11-08 19:10:03 -080083 }
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -070084
85 mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier4345c462014-06-27 10:20:14 -070086 ScopedObjectAccessUnchecked soa(Thread::Current());
87 return soa.DecodeMethod(method_);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -070088 }
89
90 void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91 ScopedObjectAccessUnchecked soa(Thread::Current());
Mathieu Chartier4345c462014-06-27 10:20:14 -070092 method_ = soa.EncodeMethod(m);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -070093 }
94
95 uint32_t DexPc() const {
96 return dex_pc_;
97 }
98
99 void SetDexPc(uint32_t pc) {
100 dex_pc_ = pc;
101 }
102
103 private:
Mathieu Chartier4345c462014-06-27 10:20:14 -0700104 jmethodID method_;
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700105 uint32_t dex_pc_;
Elliott Hughes545a0642011-11-08 19:10:03 -0800106};
107
Mathieu Chartier4345c462014-06-27 10:20:14 -0700108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109 ScopedObjectAccessUnchecked soa(Thread::Current());
110 int32_t hash_code = t->IdentityHashCode();
111 auto range = objects_.equal_range(hash_code);
112 for (auto it = range.first; it != range.second; ++it) {
113 if (soa.Decode<mirror::Class*>(it->second) == t) {
114 // Found a matching weak global, return it.
115 return it->second;
116 }
117 }
118 JNIEnv* env = soa.Env();
119 const jobject local_ref = soa.AddLocalReference<jobject>(t);
120 const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121 env->DeleteLocalRef(local_ref);
122 objects_.insert(std::make_pair(hash_code, weak_global));
123 return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
Brian Carlstrom306db812014-09-05 13:01:41 -0700127 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128 Thread* self = Thread::Current();
Mathieu Chartier4345c462014-06-27 10:20:14 -0700129 for (const auto& p : objects_) {
Brian Carlstrom306db812014-09-05 13:01:41 -0700130 vm->DeleteWeakGlobalRef(self, p.second);
Mathieu Chartier4345c462014-06-27 10:20:14 -0700131 }
132 objects_.clear();
133}
134
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700135class AllocRecord {
136 public:
137 AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
Elliott Hughes545a0642011-11-08 19:10:03 -0800138
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700139 mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier4345c462014-06-27 10:20:14 -0700140 return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700141 }
142
Brian Carlstrom306db812014-09-05 13:01:41 -0700143 void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144 Locks::alloc_tracker_lock_) {
145 type_ = Dbg::type_cache_.Add(t);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700146 }
147
148 size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes545a0642011-11-08 19:10:03 -0800149 size_t depth = 0;
Ian Rogersc0542af2014-09-03 16:16:56 -0700150 while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != nullptr) {
Elliott Hughes545a0642011-11-08 19:10:03 -0800151 ++depth;
152 }
153 return depth;
154 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -0800155
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700156 size_t ByteCount() const {
157 return byte_count_;
Mathieu Chartier412c7fc2014-02-07 12:18:39 -0800158 }
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700159
160 void SetByteCount(size_t count) {
161 byte_count_ = count;
162 }
163
164 uint16_t ThinLockId() const {
165 return thin_lock_id_;
166 }
167
168 void SetThinLockId(uint16_t id) {
169 thin_lock_id_ = id;
170 }
171
172 AllocRecordStackTraceElement* StackElement(size_t index) {
173 DCHECK_LT(index, kMaxAllocRecordStackDepth);
174 return &stack_[index];
175 }
176
177 private:
178 jobject type_; // This is a weak global.
179 size_t byte_count_;
180 uint16_t thin_lock_id_;
Ian Rogersc0542af2014-09-03 16:16:56 -0700181 AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have nullptr method.
Elliott Hughes545a0642011-11-08 19:10:03 -0800182};
183
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -0700184class Breakpoint {
185 public:
186 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188 : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189 ScopedObjectAccessUnchecked soa(Thread::Current());
190 method_ = soa.EncodeMethod(method);
191 }
192
193 Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194 : method_(nullptr), dex_pc_(other.dex_pc_),
195 need_full_deoptimization_(other.need_full_deoptimization_) {
196 ScopedObjectAccessUnchecked soa(Thread::Current());
197 method_ = soa.EncodeMethod(other.Method());
198 }
199
200 mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201 ScopedObjectAccessUnchecked soa(Thread::Current());
202 return soa.DecodeMethod(method_);
203 }
204
205 uint32_t DexPc() const {
206 return dex_pc_;
207 }
208
209 bool NeedFullDeoptimization() const {
210 return need_full_deoptimization_;
211 }
212
213 private:
Sebastien Hertza76a6d42014-03-20 16:40:17 +0100214 // The location of this breakpoint.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -0700215 jmethodID method_;
216 uint32_t dex_pc_;
Sebastien Hertza76a6d42014-03-20 16:40:17 +0100217
218 // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -0700219 bool need_full_deoptimization_;
Elliott Hughes86964332012-02-15 19:37:42 -0800220};
221
Sebastien Hertzed2be172014-08-19 15:33:43 +0200222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -0700224 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
Elliott Hughes86964332012-02-15 19:37:42 -0800225 return os;
226}
227
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
Ian Rogers62d6c772013-02-27 08:32:07 -0800229 public:
230 DebugInstrumentationListener() {}
231 virtual ~DebugInstrumentationListener() {}
232
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200233 void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234 uint32_t dex_pc)
235 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800236 if (method->IsNative()) {
237 // TODO: post location events is a suspension point and native method entry stubs aren't.
238 return;
239 }
Sebastien Hertz8379b222014-02-24 17:38:15 +0100240 Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
Ian Rogers62d6c772013-02-27 08:32:07 -0800241 }
242
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200243 void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244 uint32_t dex_pc, const JValue& return_value)
245 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800246 if (method->IsNative()) {
247 // TODO: post location events is a suspension point and native method entry stubs aren't.
248 return;
249 }
Sebastien Hertz8379b222014-02-24 17:38:15 +0100250 Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
Ian Rogers62d6c772013-02-27 08:32:07 -0800251 }
252
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200253 void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254 uint32_t dex_pc)
255 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800256 // We're not recorded to listen to this kind of event, so complain.
257 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
Sebastien Hertz51db44a2013-11-19 10:00:29 +0100258 << " " << dex_pc;
Ian Rogers62d6c772013-02-27 08:32:07 -0800259 }
260
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200261 void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262 uint32_t new_dex_pc)
263 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Sebastien Hertz8379b222014-02-24 17:38:15 +0100264 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
Ian Rogers62d6c772013-02-27 08:32:07 -0800265 }
266
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200267 void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268 uint32_t dex_pc, mirror::ArtField* field)
269 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270 Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
Ian Rogers62d6c772013-02-27 08:32:07 -0800271 }
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +0200272
273 void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274 uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276 Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277 }
278
279 void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281 mirror::Throwable* exception_object)
282 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283 Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284 }
285
286 private:
287 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
Ian Rogers62d6c772013-02-27 08:32:07 -0800288} gDebugInstrumentationListener;
289
Elliott Hughes4ffd3132011-10-24 12:06:42 -0700290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
Elliott Hughesc0f09332012-03-26 13:27:06 -0700293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
Elliott Hughes3bb81562011-10-21 18:52:59 -0700294static bool gJdwpConfigured = false;
295
Elliott Hughesc0f09332012-03-26 13:27:06 -0700296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
Elliott Hughes376a7a02011-10-24 18:35:55 -0700297static JDWP::JdwpOptions gJdwpOptions;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700298
299// Runtime JDWP state.
Ian Rogersc0542af2014-09-03 16:16:56 -0700300static JDWP::JdwpState* gJdwpState = nullptr;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700301static bool gDebuggerConnected; // debugger or DDMS is connected.
302static bool gDebuggerActive; // debugger is making requests.
Elliott Hughes86964332012-02-15 19:37:42 -0800303static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection.
Elliott Hughes3bb81562011-10-21 18:52:59 -0700304
Elliott Hughes47fce012011-10-25 18:37:19 -0700305static bool gDdmThreadNotification = false;
306
Elliott Hughes767a1472011-10-26 18:49:02 -0700307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
Ian Rogers719d1a32014-03-06 12:13:39 -0800314static ObjectRegistry* gRegistry = nullptr;
Elliott Hughes475fc232011-10-25 15:00:35 -0700315
Elliott Hughes545a0642011-11-08 19:10:03 -0800316// Recent allocation tracking.
Ian Rogers719d1a32014-03-06 12:13:39 -0800317AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
Mathieu Chartier4345c462014-06-27 10:20:14 -0700321Dbg::TypeCache Dbg::type_cache_;
Elliott Hughes545a0642011-11-08 19:10:03 -0800322
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100323// Deoptimization support.
Sebastien Hertz4d25df32014-03-21 17:44:46 +0100324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +0100326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100327
Sebastien Hertz42cd43f2014-05-13 14:15:41 +0200328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100337// Breakpoints.
jeffhao09bfc6a2012-12-11 18:11:43 -0800338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
Elliott Hughes86964332012-02-15 19:37:42 -0800339
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341 RootType root_type) {
342 if (receiver != nullptr) {
343 callback(&receiver, arg, tid, root_type);
344 }
345 if (thread != nullptr) {
346 callback(&thread, arg, tid, root_type);
347 }
348 if (klass != nullptr) {
349 callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350 }
351 if (method != nullptr) {
352 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353 }
354}
355
Sebastien Hertzbb43b432014-04-14 11:59:08 +0200356void DebugInvokeReq::Clear() {
357 invoke_needed = false;
358 receiver = nullptr;
359 thread = nullptr;
360 klass = nullptr;
361 method = nullptr;
362}
363
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365 RootType root_type) {
366 if (method != nullptr) {
367 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368 }
369}
370
Sebastien Hertzbb43b432014-04-14 11:59:08 +0200371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372 return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376 is_active = false;
377 method = nullptr;
378 dex_pcs.clear();
379}
380
Brian Carlstromea46f952013-07-30 01:26:50 -0700381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
jeffhao09bfc6a2012-12-11 18:11:43 -0800382 LOCKS_EXCLUDED(Locks::breakpoint_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700383 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Sebastien Hertzed2be172014-08-19 15:33:43 +0200384 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100385 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -0700386 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
Elliott Hughes86964332012-02-15 19:37:42 -0800387 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388 return true;
389 }
390 }
391 return false;
392}
393
Sebastien Hertz52d131d2014-03-13 16:17:40 +0100394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
Elliott Hughes9e0c1752013-01-09 14:02:58 -0800396 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397 // A thread may be suspended for GC; in this code, we really want to know whether
398 // there's a debugger suspension active.
399 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
Ian Rogersc0542af2014-09-03 16:16:56 -0700402static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700403 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700404 mirror::Object* o = gRegistry->Get<mirror::Object*>(id, error);
405 if (o == nullptr) {
406 *error = JDWP::ERR_INVALID_OBJECT;
407 return nullptr;
Elliott Hughes436e3722012-02-17 20:01:47 -0800408 }
409 if (!o->IsArrayInstance()) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700410 *error = JDWP::ERR_INVALID_ARRAY;
411 return nullptr;
Elliott Hughes436e3722012-02-17 20:01:47 -0800412 }
Ian Rogersc0542af2014-09-03 16:16:56 -0700413 *error = JDWP::ERR_NONE;
Elliott Hughes436e3722012-02-17 20:01:47 -0800414 return o->AsArray();
415}
416
Ian Rogersc0542af2014-09-03 16:16:56 -0700417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700418 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700419 mirror::Object* o = gRegistry->Get<mirror::Object*>(id, error);
420 if (o == nullptr) {
421 *error = JDWP::ERR_INVALID_OBJECT;
422 return nullptr;
Elliott Hughes436e3722012-02-17 20:01:47 -0800423 }
424 if (!o->IsClass()) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700425 *error = JDWP::ERR_INVALID_CLASS;
426 return nullptr;
Elliott Hughes436e3722012-02-17 20:01:47 -0800427 }
Ian Rogersc0542af2014-09-03 16:16:56 -0700428 *error = JDWP::ERR_NONE;
Elliott Hughes436e3722012-02-17 20:01:47 -0800429 return o->AsClass();
430}
431
Ian Rogersc0542af2014-09-03 16:16:56 -0700432static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
433 JDWP::JdwpError* error)
jeffhaoa77f0f62012-12-05 17:19:31 -0800434 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700435 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
436 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700437 mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id, error);
438 if (thread_peer == nullptr) {
Elliott Hughes221229c2013-01-08 18:17:50 -0800439 // This isn't even an object.
Ian Rogersc0542af2014-09-03 16:16:56 -0700440 *error = JDWP::ERR_INVALID_OBJECT;
441 return nullptr;
Elliott Hughes436e3722012-02-17 20:01:47 -0800442 }
Elliott Hughes221229c2013-01-08 18:17:50 -0800443
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800444 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
Elliott Hughes221229c2013-01-08 18:17:50 -0800445 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
446 // This isn't a thread.
Ian Rogersc0542af2014-09-03 16:16:56 -0700447 *error = JDWP::ERR_INVALID_THREAD;
448 return nullptr;
Elliott Hughes221229c2013-01-08 18:17:50 -0800449 }
450
Ian Rogersc0542af2014-09-03 16:16:56 -0700451 Thread* thread = Thread::FromManagedThread(soa, thread_peer);
452 // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
453 // zombie.
454 *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
455 return thread;
Elliott Hughes436e3722012-02-17 20:01:47 -0800456}
457
Elliott Hughes24437992011-11-30 14:49:33 -0800458static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
459 // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
460 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
461 return static_cast<JDWP::JdwpTag>(descriptor[0]);
462}
463
Ian Rogers1ff3c982014-08-12 02:30:58 -0700464static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
465 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
466 std::string temp;
467 const char* descriptor = klass->GetDescriptor(&temp);
468 return BasicTagFromDescriptor(descriptor);
469}
470
Ian Rogers98379392014-02-24 16:53:16 -0800471static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700472 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700473 CHECK(c != nullptr);
Elliott Hughes24437992011-11-30 14:49:33 -0800474 if (c->IsArrayClass()) {
475 return JDWP::JT_ARRAY;
476 }
Elliott Hughes24437992011-11-30 14:49:33 -0800477 if (c->IsStringClass()) {
478 return JDWP::JT_STRING;
Elliott Hughes24437992011-11-30 14:49:33 -0800479 }
Ian Rogers98379392014-02-24 16:53:16 -0800480 if (c->IsClassClass()) {
481 return JDWP::JT_CLASS_OBJECT;
482 }
483 {
484 mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
485 if (thread_class->IsAssignableFrom(c)) {
486 return JDWP::JT_THREAD;
487 }
488 }
489 {
490 mirror::Class* thread_group_class =
491 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
492 if (thread_group_class->IsAssignableFrom(c)) {
493 return JDWP::JT_THREAD_GROUP;
494 }
495 }
496 {
497 mirror::Class* class_loader_class =
498 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
499 if (class_loader_class->IsAssignableFrom(c)) {
500 return JDWP::JT_CLASS_LOADER;
501 }
502 }
503 return JDWP::JT_OBJECT;
Elliott Hughes24437992011-11-30 14:49:33 -0800504}
505
506/*
507 * Objects declared to hold Object might actually hold a more specific
508 * type. The debugger may take a special interest in these (e.g. it
509 * wants to display the contents of Strings), so we want to return an
510 * appropriate tag.
511 *
512 * Null objects are tagged JT_OBJECT.
513 */
Ian Rogers98379392014-02-24 16:53:16 -0800514static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700515 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700516 return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
Elliott Hughes24437992011-11-30 14:49:33 -0800517}
518
519static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
520 switch (tag) {
521 case JDWP::JT_BOOLEAN:
522 case JDWP::JT_BYTE:
523 case JDWP::JT_CHAR:
524 case JDWP::JT_FLOAT:
525 case JDWP::JT_DOUBLE:
526 case JDWP::JT_INT:
527 case JDWP::JT_LONG:
528 case JDWP::JT_SHORT:
529 case JDWP::JT_VOID:
530 return true;
531 default:
532 return false;
533 }
534}
535
Elliott Hughes3bb81562011-10-21 18:52:59 -0700536/*
537 * Handle one of the JDWP name/value pairs.
538 *
539 * JDWP options are:
540 * help: if specified, show help message and bail
541 * transport: may be dt_socket or dt_shmem
542 * address: for dt_socket, "host:port", or just "port" when listening
543 * server: if "y", wait for debugger to attach; if "n", attach to debugger
544 * timeout: how long to wait for debugger to connect / listen
545 *
546 * Useful with server=n (these aren't supported yet):
547 * onthrow=<exception-name>: connect to debugger when exception thrown
548 * onuncaught=y|n: connect to debugger when uncaught exception thrown
549 * launch=<command-line>: launch the debugger itself
550 *
551 * The "transport" option is required, as is "address" if server=n.
552 */
553static bool ParseJdwpOption(const std::string& name, const std::string& value) {
554 if (name == "transport") {
555 if (value == "dt_socket") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700556 gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700557 } else if (value == "dt_android_adb") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700558 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700559 } else {
560 LOG(ERROR) << "JDWP transport not supported: " << value;
561 return false;
562 }
563 } else if (name == "server") {
564 if (value == "n") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700565 gJdwpOptions.server = false;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700566 } else if (value == "y") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700567 gJdwpOptions.server = true;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700568 } else {
569 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
570 return false;
571 }
572 } else if (name == "suspend") {
573 if (value == "n") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700574 gJdwpOptions.suspend = false;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700575 } else if (value == "y") {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700576 gJdwpOptions.suspend = true;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700577 } else {
578 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
579 return false;
580 }
581 } else if (name == "address") {
582 /* this is either <port> or <host>:<port> */
583 std::string port_string;
Elliott Hughes376a7a02011-10-24 18:35:55 -0700584 gJdwpOptions.host.clear();
Elliott Hughes3bb81562011-10-21 18:52:59 -0700585 std::string::size_type colon = value.find(':');
586 if (colon != std::string::npos) {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700587 gJdwpOptions.host = value.substr(0, colon);
Elliott Hughes3bb81562011-10-21 18:52:59 -0700588 port_string = value.substr(colon + 1);
589 } else {
590 port_string = value;
591 }
592 if (port_string.empty()) {
593 LOG(ERROR) << "JDWP address missing port: " << value;
594 return false;
595 }
596 char* end;
Elliott Hughesba8eee12012-01-24 20:25:24 -0800597 uint64_t port = strtoul(port_string.c_str(), &end, 10);
598 if (*end != '\0' || port > 0xffff) {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700599 LOG(ERROR) << "JDWP address has junk in port field: " << value;
600 return false;
601 }
Elliott Hughes376a7a02011-10-24 18:35:55 -0700602 gJdwpOptions.port = port;
Elliott Hughes3bb81562011-10-21 18:52:59 -0700603 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
604 /* valid but unsupported */
605 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
606 } else {
607 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
608 }
609
610 return true;
611}
612
613/*
614 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
615 * "transport=dt_socket,address=8000,server=y,suspend=n"
616 */
617bool Dbg::ParseJdwpOptions(const std::string& options) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800618 VLOG(jdwp) << "ParseJdwpOptions: " << options;
Elliott Hughes47fce012011-10-25 18:37:19 -0700619
Elliott Hughes3bb81562011-10-21 18:52:59 -0700620 std::vector<std::string> pairs;
621 Split(options, ',', pairs);
622
623 for (size_t i = 0; i < pairs.size(); ++i) {
624 std::string::size_type equals = pairs[i].find('=');
625 if (equals == std::string::npos) {
626 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
627 return false;
628 }
629 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
630 }
631
Elliott Hughes376a7a02011-10-24 18:35:55 -0700632 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700633 LOG(ERROR) << "Must specify JDWP transport: " << options;
634 }
Elliott Hughes376a7a02011-10-24 18:35:55 -0700635 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700636 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
637 return false;
638 }
639
640 gJdwpConfigured = true;
641 return true;
642}
643
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700644void Dbg::StartJdwp() {
Elliott Hughesc0f09332012-03-26 13:27:06 -0700645 if (!gJdwpAllowed || !IsJdwpConfigured()) {
Elliott Hughes376a7a02011-10-24 18:35:55 -0700646 // No JDWP for you!
647 return;
648 }
649
Ian Rogers719d1a32014-03-06 12:13:39 -0800650 CHECK(gRegistry == nullptr);
Elliott Hughes475fc232011-10-25 15:00:35 -0700651 gRegistry = new ObjectRegistry;
652
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700653 // Init JDWP if the debugger is enabled. This may connect out to a
654 // debugger, passively listen for a debugger, or block waiting for a
655 // debugger.
Elliott Hughes376a7a02011-10-24 18:35:55 -0700656 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
Ian Rogersc0542af2014-09-03 16:16:56 -0700657 if (gJdwpState == nullptr) {
Elliott Hughesf8a2df72011-12-01 12:19:54 -0800658 // We probably failed because some other process has the port already, which means that
659 // if we don't abort the user is likely to think they're talking to us when they're actually
660 // talking to that other process.
Elliott Hughes3d30d9b2011-12-07 17:35:48 -0800661 LOG(FATAL) << "Debugger thread failed to initialize";
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700662 }
663
664 // If a debugger has already attached, send the "welcome" message.
665 // This may cause us to suspend all threads.
Elliott Hughes376a7a02011-10-24 18:35:55 -0700666 if (gJdwpState->IsActive()) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700667 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes376a7a02011-10-24 18:35:55 -0700668 if (!gJdwpState->PostVMStart()) {
Elliott Hughes3d30d9b2011-12-07 17:35:48 -0800669 LOG(WARNING) << "Failed to post 'start' message to debugger";
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700670 }
671 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700672}
673
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700674void Dbg::StopJdwp() {
Sebastien Hertzc6345ef2014-08-18 19:26:39 +0200675 // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
676 // destruction of gJdwpState).
677 if (gJdwpState != nullptr && gJdwpState->IsActive()) {
678 gJdwpState->PostVMDeath();
679 }
Sebastien Hertz0376e6b2014-02-06 18:12:59 +0100680 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
681 Disposed();
Elliott Hughes376a7a02011-10-24 18:35:55 -0700682 delete gJdwpState;
Ian Rogers719d1a32014-03-06 12:13:39 -0800683 gJdwpState = nullptr;
Elliott Hughes475fc232011-10-25 15:00:35 -0700684 delete gRegistry;
Ian Rogers719d1a32014-03-06 12:13:39 -0800685 gRegistry = nullptr;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700686}
687
Elliott Hughes767a1472011-10-26 18:49:02 -0700688void Dbg::GcDidFinish() {
689 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700690 ScopedObjectAccess soa(Thread::Current());
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700691 VLOG(jdwp) << "Sending heap info to DDM";
Elliott Hughes7162ad92011-10-27 14:08:42 -0700692 DdmSendHeapInfo(gDdmHpifWhen);
Elliott Hughes767a1472011-10-26 18:49:02 -0700693 }
694 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700695 ScopedObjectAccess soa(Thread::Current());
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700696 VLOG(jdwp) << "Dumping heap to DDM";
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700697 DdmSendHeapSegments(false);
Elliott Hughes767a1472011-10-26 18:49:02 -0700698 }
699 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700700 ScopedObjectAccess soa(Thread::Current());
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700701 VLOG(jdwp) << "Dumping native heap to DDM";
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700702 DdmSendHeapSegments(true);
Elliott Hughes767a1472011-10-26 18:49:02 -0700703 }
704}
705
Elliott Hughes4ffd3132011-10-24 12:06:42 -0700706void Dbg::SetJdwpAllowed(bool allowed) {
707 gJdwpAllowed = allowed;
708}
709
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700710DebugInvokeReq* Dbg::GetInvokeReq() {
Elliott Hughes475fc232011-10-25 15:00:35 -0700711 return Thread::Current()->GetInvokeReq();
712}
713
714Thread* Dbg::GetDebugThread() {
Ian Rogersc0542af2014-09-03 16:16:56 -0700715 return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
Elliott Hughes475fc232011-10-25 15:00:35 -0700716}
717
718void Dbg::ClearWaitForEventThread() {
719 gJdwpState->ClearWaitForEventThread();
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700720}
721
722void Dbg::Connected() {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700723 CHECK(!gDebuggerConnected);
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800724 VLOG(jdwp) << "JDWP has attached";
Elliott Hughes3bb81562011-10-21 18:52:59 -0700725 gDebuggerConnected = true;
Elliott Hughes86964332012-02-15 19:37:42 -0800726 gDisposed = false;
727}
728
729void Dbg::Disposed() {
730 gDisposed = true;
731}
732
733bool Dbg::IsDisposed() {
734 return gDisposed;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700735}
736
Elliott Hughesa2155262011-11-16 16:26:58 -0800737void Dbg::GoActive() {
738 // Enable all debugging features, including scans for breakpoints.
739 // This is a no-op if we're already active.
740 // Only called from the JDWP handler thread.
741 if (gDebuggerActive) {
742 return;
743 }
744
Elliott Hughesc0f09332012-03-26 13:27:06 -0700745 {
746 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
Sebastien Hertzed2be172014-08-19 15:33:43 +0200747 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700748 CHECK_EQ(gBreakpoints.size(), 0U);
749 }
Elliott Hughesa2155262011-11-16 16:26:58 -0800750
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100751 {
Brian Carlstrom306db812014-09-05 13:01:41 -0700752 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
Sebastien Hertz4d25df32014-03-21 17:44:46 +0100753 CHECK_EQ(deoptimization_requests_.size(), 0U);
754 CHECK_EQ(full_deoptimization_event_count_, 0U);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +0100755 CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
Sebastien Hertz42cd43f2014-05-13 14:15:41 +0200756 CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
757 CHECK_EQ(method_enter_event_ref_count_, 0U);
758 CHECK_EQ(method_exit_event_ref_count_, 0U);
759 CHECK_EQ(field_read_event_ref_count_, 0U);
760 CHECK_EQ(field_write_event_ref_count_, 0U);
761 CHECK_EQ(exception_catch_event_ref_count_, 0U);
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100762 }
763
Ian Rogers62d6c772013-02-27 08:32:07 -0800764 Runtime* runtime = Runtime::Current();
765 runtime->GetThreadList()->SuspendAll();
766 Thread* self = Thread::Current();
767 ThreadState old_state = self->SetStateUnsafe(kRunnable);
768 CHECK_NE(old_state, kRunnable);
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100769 runtime->GetInstrumentation()->EnableDeoptimization();
Sebastien Hertz42cd43f2014-05-13 14:15:41 +0200770 instrumentation_events_ = 0;
Elliott Hughesa2155262011-11-16 16:26:58 -0800771 gDebuggerActive = true;
Ian Rogers62d6c772013-02-27 08:32:07 -0800772 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
773 runtime->GetThreadList()->ResumeAll();
774
775 LOG(INFO) << "Debugger is active";
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700776}
777
778void Dbg::Disconnected() {
Elliott Hughes234ab152011-10-26 14:02:26 -0700779 CHECK(gDebuggerConnected);
780
Elliott Hughesc0f09332012-03-26 13:27:06 -0700781 LOG(INFO) << "Debugger is no longer active";
Elliott Hughes234ab152011-10-26 14:02:26 -0700782
Ian Rogers62d6c772013-02-27 08:32:07 -0800783 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
784 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
785 // and clear the object registry.
786 Runtime* runtime = Runtime::Current();
787 runtime->GetThreadList()->SuspendAll();
788 Thread* self = Thread::Current();
789 ThreadState old_state = self->SetStateUnsafe(kRunnable);
Sebastien Hertzaaea7342014-02-25 15:10:04 +0100790
791 // Debugger may not be active at this point.
792 if (gDebuggerActive) {
793 {
794 // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
795 // This prevents us from having any pending deoptimization request when the debugger attaches
796 // to us again while no event has been requested yet.
Brian Carlstrom306db812014-09-05 13:01:41 -0700797 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
Sebastien Hertz4d25df32014-03-21 17:44:46 +0100798 deoptimization_requests_.clear();
799 full_deoptimization_event_count_ = 0U;
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +0100800 delayed_full_undeoptimization_count_ = 0U;
Sebastien Hertzaaea7342014-02-25 15:10:04 +0100801 }
Sebastien Hertz42cd43f2014-05-13 14:15:41 +0200802 if (instrumentation_events_ != 0) {
803 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
804 instrumentation_events_);
805 instrumentation_events_ = 0;
806 }
Sebastien Hertzaaea7342014-02-25 15:10:04 +0100807 runtime->GetInstrumentation()->DisableDeoptimization();
808 gDebuggerActive = false;
Sebastien Hertz138dbfc2013-12-04 18:15:25 +0100809 }
Elliott Hughes234ab152011-10-26 14:02:26 -0700810 gRegistry->Clear();
811 gDebuggerConnected = false;
Ian Rogers62d6c772013-02-27 08:32:07 -0800812 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
813 runtime->GetThreadList()->ResumeAll();
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700814}
815
Elliott Hughesc0f09332012-03-26 13:27:06 -0700816bool Dbg::IsDebuggerActive() {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700817 return gDebuggerActive;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700818}
819
Elliott Hughesc0f09332012-03-26 13:27:06 -0700820bool Dbg::IsJdwpConfigured() {
Elliott Hughes3bb81562011-10-21 18:52:59 -0700821 return gJdwpConfigured;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700822}
823
824int64_t Dbg::LastDebuggerActivity() {
Elliott Hughesca951522011-12-05 12:01:32 -0800825 return gJdwpState->LastDebuggerActivity();
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700826}
827
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700828void Dbg::UndoDebuggerSuspensions() {
Elliott Hughes234ab152011-10-26 14:02:26 -0700829 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700830}
831
Elliott Hughes88d63092013-01-09 09:55:54 -0800832std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700833 JDWP::JdwpError error;
834 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
835 if (o == nullptr) {
836 if (error == JDWP::ERR_NONE) {
837 return "NULL";
838 } else {
839 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
840 }
Elliott Hughes436e3722012-02-17 20:01:47 -0800841 }
842 if (!o->IsClass()) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700843 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -0800844 }
Ian Rogers1ff3c982014-08-12 02:30:58 -0700845 std::string temp;
846 return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700847}
848
Ian Rogersc0542af2014-09-03 16:16:56 -0700849JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
Elliott Hughes436e3722012-02-17 20:01:47 -0800850 JDWP::JdwpError status;
Ian Rogersc0542af2014-09-03 16:16:56 -0700851 mirror::Class* c = DecodeClass(id, &status);
852 if (c == nullptr) {
853 *class_object_id = 0;
Elliott Hughes436e3722012-02-17 20:01:47 -0800854 return status;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -0800855 }
Ian Rogersc0542af2014-09-03 16:16:56 -0700856 *class_object_id = gRegistry->Add(c);
Elliott Hughes436e3722012-02-17 20:01:47 -0800857 return JDWP::ERR_NONE;
Elliott Hughes86964332012-02-15 19:37:42 -0800858}
859
Ian Rogersc0542af2014-09-03 16:16:56 -0700860JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -0800861 JDWP::JdwpError status;
Ian Rogersc0542af2014-09-03 16:16:56 -0700862 mirror::Class* c = DecodeClass(id, &status);
863 if (c == nullptr) {
864 *superclass_id = 0;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -0800865 return status;
866 }
867 if (c->IsInterface()) {
868 // http://code.google.com/p/android/issues/detail?id=20856
Ian Rogersc0542af2014-09-03 16:16:56 -0700869 *superclass_id = 0;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -0800870 } else {
Ian Rogersc0542af2014-09-03 16:16:56 -0700871 *superclass_id = gRegistry->Add(c->GetSuperClass());
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -0800872 }
873 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700874}
875
Elliott Hughes436e3722012-02-17 20:01:47 -0800876JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700877 JDWP::JdwpError error;
878 mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
879 if (o == nullptr) {
Elliott Hughes436e3722012-02-17 20:01:47 -0800880 return JDWP::ERR_INVALID_OBJECT;
881 }
882 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
883 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700884}
885
Elliott Hughes436e3722012-02-17 20:01:47 -0800886JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -0700887 JDWP::JdwpError error;
888 mirror::Class* c = DecodeClass(id, &error);
889 if (c == nullptr) {
890 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -0800891 }
Elliott Hughes436e3722012-02-17 20:01:47 -0800892
893 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
894
Yevgeny Roubande34eea2014-02-15 01:06:03 +0700895 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
896 // not interfaces.
Elliott Hughes436e3722012-02-17 20:01:47 -0800897 // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
Yevgeny Roubande34eea2014-02-15 01:06:03 +0700898 if ((access_flags & kAccInterface) == 0) {
899 access_flags |= kAccSuper;
900 }
Elliott Hughes436e3722012-02-17 20:01:47 -0800901
902 expandBufAdd4BE(pReply, access_flags);
903
904 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -0700905}
906
Ian Rogersc0542af2014-09-03 16:16:56 -0700907JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
908 JDWP::JdwpError error;
909 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
910 if (o == nullptr) {
Elliott Hughesf327e072013-01-09 16:01:26 -0800911 return JDWP::ERR_INVALID_OBJECT;
912 }
913
914 // Ensure all threads are suspended while we read objects' lock words.
915 Thread* self = Thread::Current();
Sebastien Hertz54263242014-03-19 18:16:50 +0100916 CHECK_EQ(self->GetState(), kRunnable);
917 self->TransitionFromRunnableToSuspended(kSuspended);
918 Runtime::Current()->GetThreadList()->SuspendAll();
Elliott Hughesf327e072013-01-09 16:01:26 -0800919
920 MonitorInfo monitor_info(o);
921
Sebastien Hertz54263242014-03-19 18:16:50 +0100922 Runtime::Current()->GetThreadList()->ResumeAll();
923 self->TransitionFromSuspendedToRunnable();
Elliott Hughesf327e072013-01-09 16:01:26 -0800924
Ian Rogersc0542af2014-09-03 16:16:56 -0700925 if (monitor_info.owner_ != nullptr) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700926 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
Elliott Hughesf327e072013-01-09 16:01:26 -0800927 } else {
Ian Rogersc0542af2014-09-03 16:16:56 -0700928 expandBufAddObjectId(reply, gRegistry->Add(nullptr));
Elliott Hughesf327e072013-01-09 16:01:26 -0800929 }
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700930 expandBufAdd4BE(reply, monitor_info.entry_count_);
931 expandBufAdd4BE(reply, monitor_info.waiters_.size());
932 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
933 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
Elliott Hughesf327e072013-01-09 16:01:26 -0800934 }
935 return JDWP::ERR_NONE;
936}
937
Elliott Hughes734b8c62013-01-11 15:32:45 -0800938JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
Ian Rogersc0542af2014-09-03 16:16:56 -0700939 std::vector<JDWP::ObjectId>* monitors,
940 std::vector<uint32_t>* stack_depths) {
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800941 struct OwnedMonitorVisitor : public StackVisitor {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700942 OwnedMonitorVisitor(Thread* thread, Context* context,
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700943 std::vector<JDWP::ObjectId>* monitor_vector,
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700944 std::vector<uint32_t>* stack_depth_vector)
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800945 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700946 : StackVisitor(thread, context), current_stack_depth(0),
947 monitors(monitor_vector), stack_depths(stack_depth_vector) {}
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800948
949 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
950 // annotalysis.
951 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
952 if (!GetMethod()->IsRuntimeMethod()) {
953 Monitor::VisitLocks(this, AppendOwnedMonitors, this);
Elliott Hughes734b8c62013-01-11 15:32:45 -0800954 ++current_stack_depth;
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800955 }
956 return true;
957 }
958
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700959 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
960 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers7a22fa62013-01-23 12:16:16 -0800961 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700962 visitor->monitors->push_back(gRegistry->Add(owned_monitor));
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700963 visitor->stack_depths->push_back(visitor->current_stack_depth);
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800964 }
965
Elliott Hughes734b8c62013-01-11 15:32:45 -0800966 size_t current_stack_depth;
Ian Rogersc0542af2014-09-03 16:16:56 -0700967 std::vector<JDWP::ObjectId>* const monitors;
968 std::vector<uint32_t>* const stack_depths;
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800969 };
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800970
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700971 ScopedObjectAccessUnchecked soa(Thread::Current());
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700972 Thread* thread;
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700973 {
974 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700975 JDWP::JdwpError error;
976 thread = DecodeThread(soa, thread_id, &error);
977 if (thread == nullptr) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700978 return error;
979 }
980 if (!IsSuspendedForDebugger(soa, thread)) {
981 return JDWP::ERR_THREAD_NOT_SUSPENDED;
982 }
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700983 }
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700984 std::unique_ptr<Context> context(Context::Create());
Ian Rogersc0542af2014-09-03 16:16:56 -0700985 OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
Hiroshi Yamauchicc8c5c52014-06-13 15:08:05 -0700986 visitor.WalkStack();
Elliott Hughes4993bbc2013-01-10 15:41:25 -0800987 return JDWP::ERR_NONE;
988}
989
Sebastien Hertz52d131d2014-03-13 16:17:40 +0100990JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
Ian Rogersc0542af2014-09-03 16:16:56 -0700991 JDWP::ObjectId* contended_monitor) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700992 mirror::Object* contended_monitor_obj;
Elliott Hughesf9501702013-01-11 11:22:27 -0800993 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -0700994 *contended_monitor = 0;
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -0700995 {
996 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700997 JDWP::JdwpError error;
998 Thread* thread = DecodeThread(soa, thread_id, &error);
999 if (thread == nullptr) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001000 return error;
1001 }
1002 if (!IsSuspendedForDebugger(soa, thread)) {
1003 return JDWP::ERR_THREAD_NOT_SUSPENDED;
1004 }
1005 contended_monitor_obj = Monitor::GetContendedMonitor(thread);
Elliott Hughesf9501702013-01-11 11:22:27 -08001006 }
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001007 // Add() requires the thread_list_lock_ not held to avoid the lock
1008 // level violation.
Ian Rogersc0542af2014-09-03 16:16:56 -07001009 *contended_monitor = gRegistry->Add(contended_monitor_obj);
Elliott Hughesf9501702013-01-11 11:22:27 -08001010 return JDWP::ERR_NONE;
1011}
1012
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001013JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
Ian Rogersc0542af2014-09-03 16:16:56 -07001014 std::vector<uint64_t>* counts) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001015 gc::Heap* heap = Runtime::Current()->GetHeap();
1016 heap->CollectGarbage(false);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001017 std::vector<mirror::Class*> classes;
Ian Rogersc0542af2014-09-03 16:16:56 -07001018 counts->clear();
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001019 for (size_t i = 0; i < class_ids.size(); ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001020 JDWP::JdwpError error;
1021 mirror::Class* c = DecodeClass(class_ids[i], &error);
1022 if (c == nullptr) {
1023 return error;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001024 }
1025 classes.push_back(c);
Ian Rogersc0542af2014-09-03 16:16:56 -07001026 counts->push_back(0);
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001027 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001028 heap->CountInstances(classes, false, &(*counts)[0]);
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001029 return JDWP::ERR_NONE;
1030}
1031
Ian Rogersc0542af2014-09-03 16:16:56 -07001032JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
1033 std::vector<JDWP::ObjectId>* instances) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001034 gc::Heap* heap = Runtime::Current()->GetHeap();
1035 // We only want reachable instances, so do a GC.
1036 heap->CollectGarbage(false);
Ian Rogersc0542af2014-09-03 16:16:56 -07001037 JDWP::JdwpError error;
1038 mirror::Class* c = DecodeClass(class_id, &error);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001039 if (c == nullptr) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001040 return error;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001041 }
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001042 std::vector<mirror::Object*> raw_instances;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001043 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1044 for (size_t i = 0; i < raw_instances.size(); ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001045 instances->push_back(gRegistry->Add(raw_instances[i]));
Elliott Hughes3b78c942013-01-15 17:35:41 -08001046 }
1047 return JDWP::ERR_NONE;
1048}
1049
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001050JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
Ian Rogersc0542af2014-09-03 16:16:56 -07001051 std::vector<JDWP::ObjectId>* referring_objects) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001052 gc::Heap* heap = Runtime::Current()->GetHeap();
1053 heap->CollectGarbage(false);
Ian Rogersc0542af2014-09-03 16:16:56 -07001054 JDWP::JdwpError error;
1055 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1056 if (o == nullptr) {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001057 return JDWP::ERR_INVALID_OBJECT;
1058 }
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001059 std::vector<mirror::Object*> raw_instances;
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001060 heap->GetReferringObjects(o, max_count, raw_instances);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001061 for (size_t i = 0; i < raw_instances.size(); ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001062 referring_objects->push_back(gRegistry->Add(raw_instances[i]));
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001063 }
1064 return JDWP::ERR_NONE;
1065}
1066
Ian Rogersc0542af2014-09-03 16:16:56 -07001067JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
1068 JDWP::JdwpError error;
1069 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1070 if (o == nullptr) {
Sebastien Hertze96060a2013-12-11 12:06:28 +01001071 return JDWP::ERR_INVALID_OBJECT;
1072 }
Elliott Hughes64f574f2013-02-20 14:57:12 -08001073 gRegistry->DisableCollection(object_id);
1074 return JDWP::ERR_NONE;
1075}
1076
Ian Rogersc0542af2014-09-03 16:16:56 -07001077JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
1078 JDWP::JdwpError error;
1079 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
Sebastien Hertze96060a2013-12-11 12:06:28 +01001080 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1081 // also ignores these cases and never return an error. However it's not obvious why this command
1082 // should behave differently from DisableCollection and IsCollected commands. So let's be more
1083 // strict and return an error if this happens.
Ian Rogersc0542af2014-09-03 16:16:56 -07001084 if (o == nullptr) {
Sebastien Hertze96060a2013-12-11 12:06:28 +01001085 return JDWP::ERR_INVALID_OBJECT;
1086 }
Elliott Hughes64f574f2013-02-20 14:57:12 -08001087 gRegistry->EnableCollection(object_id);
1088 return JDWP::ERR_NONE;
1089}
1090
Ian Rogersc0542af2014-09-03 16:16:56 -07001091JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
1092 *is_collected = true;
Sebastien Hertz65637eb2014-01-10 17:40:02 +01001093 if (object_id == 0) {
1094 // Null object id is invalid.
Sebastien Hertze96060a2013-12-11 12:06:28 +01001095 return JDWP::ERR_INVALID_OBJECT;
1096 }
Sebastien Hertz65637eb2014-01-10 17:40:02 +01001097 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1098 // the RI seems to ignore this and assume object has been collected.
Ian Rogersc0542af2014-09-03 16:16:56 -07001099 JDWP::JdwpError error;
1100 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1101 if (o != nullptr) {
1102 *is_collected = gRegistry->IsCollected(object_id);
Sebastien Hertz65637eb2014-01-10 17:40:02 +01001103 }
Elliott Hughes64f574f2013-02-20 14:57:12 -08001104 return JDWP::ERR_NONE;
1105}
1106
Ian Rogersc0542af2014-09-03 16:16:56 -07001107void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
Elliott Hughes64f574f2013-02-20 14:57:12 -08001108 gRegistry->DisposeObject(object_id, reference_count);
1109}
1110
Sebastien Hertz4d8fd492014-03-28 16:29:41 +01001111static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
1112 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1113 DCHECK(klass != nullptr);
1114 if (klass->IsArrayClass()) {
1115 return JDWP::TT_ARRAY;
1116 } else if (klass->IsInterface()) {
1117 return JDWP::TT_INTERFACE;
1118 } else {
1119 return JDWP::TT_CLASS;
1120 }
1121}
1122
Elliott Hughes88d63092013-01-09 09:55:54 -08001123JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001124 JDWP::JdwpError error;
1125 mirror::Class* c = DecodeClass(class_id, &error);
1126 if (c == nullptr) {
1127 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001128 }
Elliott Hughes436e3722012-02-17 20:01:47 -08001129
Sebastien Hertz4d8fd492014-03-28 16:29:41 +01001130 JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1131 expandBufAdd1(pReply, type_tag);
Elliott Hughes88d63092013-01-09 09:55:54 -08001132 expandBufAddRefTypeId(pReply, class_id);
Elliott Hughes436e3722012-02-17 20:01:47 -08001133 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001134}
1135
Ian Rogersc0542af2014-09-03 16:16:56 -07001136void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
Elliott Hughesa2155262011-11-16 16:26:58 -08001137 // Get the complete list of reference classes (i.e. all classes except
1138 // the primitive types).
1139 // Returns a newly-allocated buffer full of RefTypeId values.
1140 struct ClassListCreator {
Ian Rogersc0542af2014-09-03 16:16:56 -07001141 explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes(classes) {
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001142 }
1143
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001144 static bool Visit(mirror::Class* c, void* arg) {
Elliott Hughesa2155262011-11-16 16:26:58 -08001145 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1146 }
1147
Elliott Hughes64f574f2013-02-20 14:57:12 -08001148 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1149 // annotalysis.
1150 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
Elliott Hughesa2155262011-11-16 16:26:58 -08001151 if (!c->IsPrimitive()) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001152 classes->push_back(gRegistry->AddRefType(c));
Elliott Hughesa2155262011-11-16 16:26:58 -08001153 }
1154 return true;
1155 }
1156
Ian Rogersc0542af2014-09-03 16:16:56 -07001157 std::vector<JDWP::RefTypeId>* const classes;
Elliott Hughesa2155262011-11-16 16:26:58 -08001158 };
1159
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001160 ClassListCreator clc(classes);
Sebastien Hertz4537c412014-08-28 14:41:50 +02001161 Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1162 &clc);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001163}
1164
Ian Rogers1ff3c982014-08-12 02:30:58 -07001165JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1166 uint32_t* pStatus, std::string* pDescriptor) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001167 JDWP::JdwpError error;
1168 mirror::Class* c = DecodeClass(class_id, &error);
1169 if (c == nullptr) {
1170 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001171 }
1172
Elliott Hughesa2155262011-11-16 16:26:58 -08001173 if (c->IsArrayClass()) {
1174 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1175 *pTypeTag = JDWP::TT_ARRAY;
1176 } else {
1177 if (c->IsErroneous()) {
1178 *pStatus = JDWP::CS_ERROR;
1179 } else {
1180 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1181 }
1182 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1183 }
1184
Ian Rogersc0542af2014-09-03 16:16:56 -07001185 if (pDescriptor != nullptr) {
Ian Rogers1ff3c982014-08-12 02:30:58 -07001186 std::string temp;
1187 *pDescriptor = c->GetDescriptor(&temp);
Elliott Hughesa2155262011-11-16 16:26:58 -08001188 }
Elliott Hughes436e3722012-02-17 20:01:47 -08001189 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001190}
1191
Ian Rogersc0542af2014-09-03 16:16:56 -07001192void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001193 std::vector<mirror::Class*> classes;
Elliott Hughes6fa602d2011-12-02 17:54:25 -08001194 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
Ian Rogersc0542af2014-09-03 16:16:56 -07001195 ids->clear();
Elliott Hughes6fa602d2011-12-02 17:54:25 -08001196 for (size_t i = 0; i < classes.size(); ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001197 ids->push_back(gRegistry->Add(classes[i]));
Elliott Hughes6fa602d2011-12-02 17:54:25 -08001198 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001199}
1200
Ian Rogersc0542af2014-09-03 16:16:56 -07001201JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1202 JDWP::JdwpError error;
1203 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1204 if (o == nullptr) {
Elliott Hughes2435a572012-02-17 16:07:41 -08001205 return JDWP::ERR_INVALID_OBJECT;
Elliott Hughes499c5132011-11-17 14:55:11 -08001206 }
Elliott Hughes2435a572012-02-17 16:07:41 -08001207
Sebastien Hertz4d8fd492014-03-28 16:29:41 +01001208 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
Elliott Hughes64f574f2013-02-20 14:57:12 -08001209 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
Elliott Hughes2435a572012-02-17 16:07:41 -08001210
1211 expandBufAdd1(pReply, type_tag);
1212 expandBufAddRefTypeId(pReply, type_id);
1213
1214 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001215}
1216
Ian Rogersfc0e94b2013-09-23 23:51:32 -07001217JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001218 JDWP::JdwpError error;
1219 mirror::Class* c = DecodeClass(class_id, &error);
1220 if (c == nullptr) {
1221 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001222 }
Ian Rogers1ff3c982014-08-12 02:30:58 -07001223 std::string temp;
1224 *signature = c->GetDescriptor(&temp);
Elliott Hughes1fe7afb2012-02-13 17:23:03 -08001225 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001226}
1227
Ian Rogersc0542af2014-09-03 16:16:56 -07001228JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1229 JDWP::JdwpError error;
1230 mirror::Class* c = DecodeClass(class_id, &error);
Sebastien Hertz4206eb52014-06-05 10:15:45 +02001231 if (c == nullptr) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001232 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001233 }
Sebastien Hertz4206eb52014-06-05 10:15:45 +02001234 const char* source_file = c->GetSourceFile();
1235 if (source_file == nullptr) {
Sebastien Hertzb7054ba2014-03-13 11:52:31 +01001236 return JDWP::ERR_ABSENT_INFORMATION;
1237 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001238 *result = source_file;
Elliott Hughes436e3722012-02-17 20:01:47 -08001239 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001240}
1241
Ian Rogersc0542af2014-09-03 16:16:56 -07001242JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
Ian Rogers98379392014-02-24 16:53:16 -08001243 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07001244 JDWP::JdwpError error;
1245 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1246 if (error != JDWP::ERR_NONE) {
1247 *tag = JDWP::JT_VOID;
1248 return error;
Elliott Hughes546b9862012-06-20 16:06:13 -07001249 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001250 *tag = TagFromObject(soa, o);
Elliott Hughes546b9862012-06-20 16:06:13 -07001251 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001252}
1253
Elliott Hughesaed4be92011-12-02 16:16:23 -08001254size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
Elliott Hughesdbb40792011-11-18 17:05:22 -08001255 switch (tag) {
1256 case JDWP::JT_VOID:
1257 return 0;
1258 case JDWP::JT_BYTE:
1259 case JDWP::JT_BOOLEAN:
1260 return 1;
1261 case JDWP::JT_CHAR:
1262 case JDWP::JT_SHORT:
1263 return 2;
1264 case JDWP::JT_FLOAT:
1265 case JDWP::JT_INT:
1266 return 4;
1267 case JDWP::JT_ARRAY:
1268 case JDWP::JT_OBJECT:
1269 case JDWP::JT_STRING:
1270 case JDWP::JT_THREAD:
1271 case JDWP::JT_THREAD_GROUP:
1272 case JDWP::JT_CLASS_LOADER:
1273 case JDWP::JT_CLASS_OBJECT:
1274 return sizeof(JDWP::ObjectId);
1275 case JDWP::JT_DOUBLE:
1276 case JDWP::JT_LONG:
1277 return 8;
1278 default:
Elliott Hughes3d30d9b2011-12-07 17:35:48 -08001279 LOG(FATAL) << "Unknown tag " << tag;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001280 return -1;
1281 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001282}
1283
Ian Rogersc0542af2014-09-03 16:16:56 -07001284JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1285 JDWP::JdwpError error;
1286 mirror::Array* a = DecodeNonNullArray(array_id, &error);
1287 if (a == nullptr) {
1288 return error;
Elliott Hughes24437992011-11-30 14:49:33 -08001289 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001290 *length = a->GetLength();
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001291 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001292}
1293
Elliott Hughes88d63092013-01-09 09:55:54 -08001294JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001295 JDWP::JdwpError error;
1296 mirror::Array* a = DecodeNonNullArray(array_id, &error);
Ian Rogers98379392014-02-24 16:53:16 -08001297 if (a == nullptr) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001298 return error;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001299 }
Elliott Hughes24437992011-11-30 14:49:33 -08001300
1301 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1302 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001303 return JDWP::ERR_INVALID_LENGTH;
Elliott Hughes24437992011-11-30 14:49:33 -08001304 }
Ian Rogers1ff3c982014-08-12 02:30:58 -07001305 JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1306 expandBufAdd1(pReply, element_tag);
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001307 expandBufAdd4BE(pReply, count);
1308
Ian Rogers1ff3c982014-08-12 02:30:58 -07001309 if (IsPrimitiveTag(element_tag)) {
1310 size_t width = GetTagWidth(element_tag);
Elliott Hughes24437992011-11-30 14:49:33 -08001311 uint8_t* dst = expandBufAddSpace(pReply, count * width);
1312 if (width == 8) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08001313 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
Elliott Hughes24437992011-11-30 14:49:33 -08001314 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1315 } else if (width == 4) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08001316 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
Elliott Hughes24437992011-11-30 14:49:33 -08001317 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1318 } else if (width == 2) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08001319 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
Elliott Hughes24437992011-11-30 14:49:33 -08001320 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1321 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08001322 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
Elliott Hughes24437992011-11-30 14:49:33 -08001323 memcpy(dst, &src[offset * width], count * width);
1324 }
1325 } else {
Ian Rogers98379392014-02-24 16:53:16 -08001326 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001327 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
Elliott Hughes24437992011-11-30 14:49:33 -08001328 for (int i = 0; i < count; ++i) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001329 mirror::Object* element = oa->Get(offset + i);
Ian Rogers98379392014-02-24 16:53:16 -08001330 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
Ian Rogers1ff3c982014-08-12 02:30:58 -07001331 : element_tag;
Elliott Hughes24437992011-11-30 14:49:33 -08001332 expandBufAdd1(pReply, specific_tag);
1333 expandBufAddObjectId(pReply, gRegistry->Add(element));
1334 }
1335 }
1336
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001337 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001338}
1339
Ian Rogersef7d42f2014-01-06 12:55:46 -08001340template <typename T>
Ian Rogersc0542af2014-09-03 16:16:56 -07001341static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
Ian Rogersef7d42f2014-01-06 12:55:46 -08001342 NO_THREAD_SAFETY_ANALYSIS {
1343 // TODO: fix when annotalysis correctly handles non-member functions.
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001344 DCHECK(a->GetClass()->IsPrimitiveArray());
1345
Ian Rogersef7d42f2014-01-06 12:55:46 -08001346 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001347 for (int i = 0; i < count; ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001348 *dst++ = src->ReadValue(sizeof(T));
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001349 }
1350}
1351
Elliott Hughes88d63092013-01-09 09:55:54 -08001352JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
Ian Rogersc0542af2014-09-03 16:16:56 -07001353 JDWP::Request* request) {
1354 JDWP::JdwpError error;
1355 mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1356 if (dst == nullptr) {
1357 return error;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001358 }
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001359
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001360 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001361 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001362 return JDWP::ERR_INVALID_LENGTH;
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001363 }
Ian Rogers1ff3c982014-08-12 02:30:58 -07001364 JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001365
Ian Rogers1ff3c982014-08-12 02:30:58 -07001366 if (IsPrimitiveTag(element_tag)) {
1367 size_t width = GetTagWidth(element_tag);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001368 if (width == 8) {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001369 CopyArrayData<uint64_t>(dst, request, offset, count);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001370 } else if (width == 4) {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001371 CopyArrayData<uint32_t>(dst, request, offset, count);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001372 } else if (width == 2) {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001373 CopyArrayData<uint16_t>(dst, request, offset, count);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001374 } else {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001375 CopyArrayData<uint8_t>(dst, request, offset, count);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001376 }
1377 } else {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08001378 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001379 for (int i = 0; i < count; ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001380 JDWP::ObjectId id = request->ReadObjectId();
1381 JDWP::JdwpError error;
1382 mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1383 if (error != JDWP::ERR_NONE) {
1384 return error;
Elliott Hughes436e3722012-02-17 20:01:47 -08001385 }
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001386 oa->Set<false>(offset + i, o);
Elliott Hughesf03b8f62011-12-02 14:26:25 -08001387 }
1388 }
1389
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001390 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001391}
1392
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001393JDWP::ObjectId Dbg::CreateString(const std::string& str) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001394 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001395}
1396
Ian Rogersc0542af2014-09-03 16:16:56 -07001397JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object) {
1398 JDWP::JdwpError error;
1399 mirror::Class* c = DecodeClass(class_id, &error);
1400 if (c == nullptr) {
1401 *new_object = 0;
1402 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001403 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001404 *new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
Elliott Hughes436e3722012-02-17 20:01:47 -08001405 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001406}
1407
Elliott Hughesbf13d362011-12-08 15:51:37 -08001408/*
1409 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1410 */
Elliott Hughes88d63092013-01-09 09:55:54 -08001411JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
Ian Rogersc0542af2014-09-03 16:16:56 -07001412 JDWP::ObjectId* new_array) {
1413 JDWP::JdwpError error;
1414 mirror::Class* c = DecodeClass(array_class_id, &error);
1415 if (c == nullptr) {
1416 *new_array = 0;
1417 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001418 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001419 *new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1420 c->GetComponentSize(),
1421 Runtime::Current()->GetHeap()->GetCurrentAllocator()));
Elliott Hughes436e3722012-02-17 20:01:47 -08001422 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001423}
1424
Elliott Hughes88d63092013-01-09 09:55:54 -08001425bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001426 JDWP::JdwpError error;
1427 mirror::Class* c1 = DecodeClass(instance_class_id, &error);
1428 CHECK(c1 != nullptr);
1429 mirror::Class* c2 = DecodeClass(class_id, &error);
1430 CHECK(c2 != nullptr);
Sebastien Hertz123756a2013-11-27 15:49:42 +01001431 return c2->IsAssignableFrom(c1);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001432}
1433
Brian Carlstromea46f952013-07-30 01:26:50 -07001434static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001435 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001436 CHECK(!kMovingFields);
Elliott Hughes03181a82011-11-17 17:22:21 -08001437 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
Elliott Hughes03181a82011-11-17 17:22:21 -08001438}
1439
Brian Carlstromea46f952013-07-30 01:26:50 -07001440static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001441 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001442 CHECK(!kMovingMethods);
Elliott Hughes03181a82011-11-17 17:22:21 -08001443 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
Elliott Hughes03181a82011-11-17 17:22:21 -08001444}
1445
Brian Carlstromea46f952013-07-30 01:26:50 -07001446static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001447 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001448 CHECK(!kMovingFields);
Brian Carlstromea46f952013-07-30 01:26:50 -07001449 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
Elliott Hughesaed4be92011-12-02 16:16:23 -08001450}
1451
Brian Carlstromea46f952013-07-30 01:26:50 -07001452static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001453 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001454 CHECK(!kMovingMethods);
Brian Carlstromea46f952013-07-30 01:26:50 -07001455 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
Elliott Hughes03181a82011-11-17 17:22:21 -08001456}
1457
Ian Rogersc0542af2014-09-03 16:16:56 -07001458static void SetLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001459 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001460 if (m == nullptr) {
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08001461 memset(&location, 0, sizeof(location));
1462 } else {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001463 mirror::Class* c = m->GetDeclaringClass();
Ian Rogersc0542af2014-09-03 16:16:56 -07001464 location->type_tag = GetTypeTag(c);
1465 location->class_id = gRegistry->AddRefType(c);
1466 location->method_id = ToMethodId(m);
1467 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08001468 }
Elliott Hughesd07986f2011-12-06 18:27:45 -08001469}
1470
Ian Rogersc0542af2014-09-03 16:16:56 -07001471std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001472 mirror::ArtMethod* m = FromMethodId(method_id);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001473 return m->GetName();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001474}
1475
Ian Rogersc0542af2014-09-03 16:16:56 -07001476std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001477 return FromFieldId(field_id)->GetName();
Elliott Hughesa96836a2013-01-17 12:27:49 -08001478}
1479
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001480/*
1481 * Augment the access flags for synthetic methods and fields by setting
1482 * the (as described by the spec) "0xf0000000 bit". Also, strip out any
1483 * flags not specified by the Java programming language.
1484 */
1485static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1486 accessFlags &= kAccJavaFlagsMask;
1487 if ((accessFlags & kAccSynthetic) != 0) {
1488 accessFlags |= 0xf0000000;
1489 }
1490 return accessFlags;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001491}
1492
Elliott Hughesdbb40792011-11-18 17:05:22 -08001493/*
Jeff Haob7cefc72013-11-14 14:51:09 -08001494 * Circularly shifts registers so that arguments come first. Debuggers
1495 * expect slots to begin with arguments, but dex code places them at
1496 * the end.
Elliott Hughesdbb40792011-11-18 17:05:22 -08001497 */
Jeff Haob7cefc72013-11-14 14:51:09 -08001498static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1499 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001500 const DexFile::CodeItem* code_item = m->GetCodeItem();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001501 if (code_item == nullptr) {
1502 // We should not get here for a method without code (native, proxy or abstract). Log it and
1503 // return the slot as is since all registers are arguments.
1504 LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1505 return slot;
1506 }
Jeff Haob7cefc72013-11-14 14:51:09 -08001507 uint16_t ins_size = code_item->ins_size_;
1508 uint16_t locals_size = code_item->registers_size_ - ins_size;
1509 if (slot >= locals_size) {
1510 return slot - locals_size;
1511 } else {
1512 return slot + ins_size;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001513 }
Elliott Hughesdbb40792011-11-18 17:05:22 -08001514}
1515
Jeff Haob7cefc72013-11-14 14:51:09 -08001516/*
1517 * Circularly shifts registers so that arguments come last. Reverts
1518 * slots to dex style argument placement.
1519 */
Brian Carlstromea46f952013-07-30 01:26:50 -07001520static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001521 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001522 const DexFile::CodeItem* code_item = m->GetCodeItem();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001523 if (code_item == nullptr) {
1524 // We should not get here for a method without code (native, proxy or abstract). Log it and
1525 // return the slot as is since all registers are arguments.
1526 LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1527 return slot;
1528 }
Jeff Haob7cefc72013-11-14 14:51:09 -08001529 uint16_t ins_size = code_item->ins_size_;
1530 uint16_t locals_size = code_item->registers_size_ - ins_size;
1531 if (slot < ins_size) {
1532 return slot + locals_size;
1533 } else {
1534 return slot - ins_size;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001535 }
Elliott Hughesdbb40792011-11-18 17:05:22 -08001536}
1537
Elliott Hughes88d63092013-01-09 09:55:54 -08001538JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001539 JDWP::JdwpError error;
1540 mirror::Class* c = DecodeClass(class_id, &error);
1541 if (c == nullptr) {
1542 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001543 }
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001544
1545 size_t instance_field_count = c->NumInstanceFields();
1546 size_t static_field_count = c->NumStaticFields();
1547
1548 expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1549
1550 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001551 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001552 expandBufAddFieldId(pReply, ToFieldId(f));
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001553 expandBufAddUtf8String(pReply, f->GetName());
1554 expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001555 if (with_generic) {
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001556 static const char genericSignature[1] = "";
1557 expandBufAddUtf8String(pReply, genericSignature);
1558 }
1559 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1560 }
Elliott Hughes436e3722012-02-17 20:01:47 -08001561 return JDWP::ERR_NONE;
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001562}
1563
Elliott Hughes88d63092013-01-09 09:55:54 -08001564JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001565 JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001566 JDWP::JdwpError error;
1567 mirror::Class* c = DecodeClass(class_id, &error);
1568 if (c == nullptr) {
1569 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001570 }
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001571
1572 size_t direct_method_count = c->NumDirectMethods();
1573 size_t virtual_method_count = c->NumVirtualMethods();
1574
1575 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1576
1577 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001578 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001579 expandBufAddMethodId(pReply, ToMethodId(m));
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001580 expandBufAddUtf8String(pReply, m->GetName());
1581 expandBufAddUtf8String(pReply, m->GetSignature().ToString());
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001582 if (with_generic) {
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001583 static const char genericSignature[1] = "";
1584 expandBufAddUtf8String(pReply, genericSignature);
1585 }
1586 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1587 }
Elliott Hughes436e3722012-02-17 20:01:47 -08001588 return JDWP::ERR_NONE;
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001589}
1590
Elliott Hughes88d63092013-01-09 09:55:54 -08001591JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001592 JDWP::JdwpError error;
Mathieu Chartierf8322842014-05-16 10:59:25 -07001593 Thread* self = Thread::Current();
1594 StackHandleScope<1> hs(self);
Ian Rogersc0542af2014-09-03 16:16:56 -07001595 Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
Mathieu Chartierf8322842014-05-16 10:59:25 -07001596 if (c.Get() == nullptr) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001597 return error;
Elliott Hughes7b3cdfc2011-12-08 21:28:17 -08001598 }
Mathieu Chartierf8322842014-05-16 10:59:25 -07001599 size_t interface_count = c->NumDirectInterfaces();
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001600 expandBufAdd4BE(pReply, interface_count);
1601 for (size_t i = 0; i < interface_count; ++i) {
Mathieu Chartierf8322842014-05-16 10:59:25 -07001602 expandBufAddRefTypeId(pReply,
1603 gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001604 }
Elliott Hughes436e3722012-02-17 20:01:47 -08001605 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001606}
1607
Ian Rogersc0542af2014-09-03 16:16:56 -07001608void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
Elliott Hughes03181a82011-11-17 17:22:21 -08001609 struct DebugCallbackContext {
1610 int numItems;
1611 JDWP::ExpandBuf* pReply;
1612
Elliott Hughes2435a572012-02-17 16:07:41 -08001613 static bool Callback(void* context, uint32_t address, uint32_t line_number) {
Elliott Hughes03181a82011-11-17 17:22:21 -08001614 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1615 expandBufAdd8BE(pContext->pReply, address);
Elliott Hughes2435a572012-02-17 16:07:41 -08001616 expandBufAdd4BE(pContext->pReply, line_number);
Elliott Hughes03181a82011-11-17 17:22:21 -08001617 pContext->numItems++;
Sebastien Hertzf2910ee2013-10-19 16:39:24 +02001618 return false;
Elliott Hughes03181a82011-11-17 17:22:21 -08001619 }
1620 };
Brian Carlstromea46f952013-07-30 01:26:50 -07001621 mirror::ArtMethod* m = FromMethodId(method_id);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001622 const DexFile::CodeItem* code_item = m->GetCodeItem();
Elliott Hughes03181a82011-11-17 17:22:21 -08001623 uint64_t start, end;
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001624 if (code_item == nullptr) {
1625 DCHECK(m->IsNative() || m->IsProxyMethod());
Elliott Hughes03181a82011-11-17 17:22:21 -08001626 start = -1;
1627 end = -1;
1628 } else {
1629 start = 0;
jeffhao14f0db92012-12-14 17:50:42 -08001630 // Return the index of the last instruction
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001631 end = code_item->insns_size_in_code_units_ - 1;
Elliott Hughes03181a82011-11-17 17:22:21 -08001632 }
1633
1634 expandBufAdd8BE(pReply, start);
1635 expandBufAdd8BE(pReply, end);
1636
1637 // Add numLines later
1638 size_t numLinesOffset = expandBufGetLength(pReply);
1639 expandBufAdd4BE(pReply, 0);
1640
1641 DebugCallbackContext context;
1642 context.numItems = 0;
1643 context.pReply = pReply;
1644
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001645 if (code_item != nullptr) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001646 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
Ian Rogersc0542af2014-09-03 16:16:56 -07001647 DebugCallbackContext::Callback, nullptr, &context);
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001648 }
Elliott Hughes03181a82011-11-17 17:22:21 -08001649
1650 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001651}
1652
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001653void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1654 JDWP::ExpandBuf* pReply) {
Elliott Hughesdbb40792011-11-18 17:05:22 -08001655 struct DebugCallbackContext {
Jeff Haob7cefc72013-11-14 14:51:09 -08001656 mirror::ArtMethod* method;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001657 JDWP::ExpandBuf* pReply;
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001658 size_t variable_count;
1659 bool with_generic;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001660
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001661 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1662 const char* name, const char* descriptor, const char* signature)
Jeff Haob7cefc72013-11-14 14:51:09 -08001663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughesdbb40792011-11-18 17:05:22 -08001664 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1665
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001666 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1667 pContext->variable_count, startAddress, endAddress - startAddress,
1668 name, descriptor, signature, slot,
1669 MangleSlot(slot, pContext->method));
Elliott Hughesdbb40792011-11-18 17:05:22 -08001670
Jeff Haob7cefc72013-11-14 14:51:09 -08001671 slot = MangleSlot(slot, pContext->method);
Elliott Hughes68fdbd02011-11-29 19:22:47 -08001672
Elliott Hughesdbb40792011-11-18 17:05:22 -08001673 expandBufAdd8BE(pContext->pReply, startAddress);
1674 expandBufAddUtf8String(pContext->pReply, name);
1675 expandBufAddUtf8String(pContext->pReply, descriptor);
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001676 if (pContext->with_generic) {
Elliott Hughesdbb40792011-11-18 17:05:22 -08001677 expandBufAddUtf8String(pContext->pReply, signature);
1678 }
1679 expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1680 expandBufAdd4BE(pContext->pReply, slot);
1681
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001682 ++pContext->variable_count;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001683 }
1684 };
Brian Carlstromea46f952013-07-30 01:26:50 -07001685 mirror::ArtMethod* m = FromMethodId(method_id);
Elliott Hughesdbb40792011-11-18 17:05:22 -08001686
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001687 // arg_count considers doubles and longs to take 2 units.
1688 // variable_count considers everything to take 1 unit.
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001689 std::string shorty(m->GetShorty());
Brian Carlstromea46f952013-07-30 01:26:50 -07001690 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
Elliott Hughesdbb40792011-11-18 17:05:22 -08001691
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001692 // We don't know the total number of variables yet, so leave a blank and update it later.
1693 size_t variable_count_offset = expandBufGetLength(pReply);
Elliott Hughesdbb40792011-11-18 17:05:22 -08001694 expandBufAdd4BE(pReply, 0);
1695
1696 DebugCallbackContext context;
Jeff Haob7cefc72013-11-14 14:51:09 -08001697 context.method = m;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001698 context.pReply = pReply;
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001699 context.variable_count = 0;
1700 context.with_generic = with_generic;
Elliott Hughesdbb40792011-11-18 17:05:22 -08001701
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001702 const DexFile::CodeItem* code_item = m->GetCodeItem();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001703 if (code_item != nullptr) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001704 m->GetDexFile()->DecodeDebugInfo(
Ian Rogersc0542af2014-09-03 16:16:56 -07001705 code_item, m->IsStatic(), m->GetDexMethodIndex(), nullptr, DebugCallbackContext::Callback,
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001706 &context);
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01001707 }
Elliott Hughesdbb40792011-11-18 17:05:22 -08001708
Elliott Hughesc5b734a2011-12-01 17:20:58 -08001709 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001710}
1711
Jeff Hao579b0242013-11-18 13:16:49 -08001712void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1713 JDWP::ExpandBuf* pReply) {
1714 mirror::ArtMethod* m = FromMethodId(method_id);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001715 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
Jeff Hao579b0242013-11-18 13:16:49 -08001716 OutputJValue(tag, return_value, pReply);
1717}
1718
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02001719void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1720 JDWP::ExpandBuf* pReply) {
1721 mirror::ArtField* f = FromFieldId(field_id);
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001722 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02001723 OutputJValue(tag, field_value, pReply);
1724}
1725
Elliott Hughes9777ba22013-01-17 09:04:19 -08001726JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
Ian Rogersc0542af2014-09-03 16:16:56 -07001727 std::vector<uint8_t>* bytecodes) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001728 mirror::ArtMethod* m = FromMethodId(method_id);
Ian Rogersc0542af2014-09-03 16:16:56 -07001729 if (m == nullptr) {
Elliott Hughes9777ba22013-01-17 09:04:19 -08001730 return JDWP::ERR_INVALID_METHODID;
1731 }
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07001732 const DexFile::CodeItem* code_item = m->GetCodeItem();
Elliott Hughes9777ba22013-01-17 09:04:19 -08001733 size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1734 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1735 const uint8_t* end = begin + byte_count;
1736 for (const uint8_t* p = begin; p != end; ++p) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001737 bytecodes->push_back(*p);
Elliott Hughes9777ba22013-01-17 09:04:19 -08001738 }
1739 return JDWP::ERR_NONE;
1740}
1741
Elliott Hughes88d63092013-01-09 09:55:54 -08001742JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001743 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001744}
1745
Elliott Hughes88d63092013-01-09 09:55:54 -08001746JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001747 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001748}
1749
Elliott Hughes88d63092013-01-09 09:55:54 -08001750static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1751 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001752 bool is_static)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001753 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001754 JDWP::JdwpError error;
1755 mirror::Class* c = DecodeClass(ref_type_id, &error);
1756 if (ref_type_id != 0 && c == nullptr) {
1757 return error;
Elliott Hughes0cf74332012-02-23 23:14:00 -08001758 }
1759
Ian Rogersc0542af2014-09-03 16:16:56 -07001760 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1761 if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001762 return JDWP::ERR_INVALID_OBJECT;
1763 }
Brian Carlstromea46f952013-07-30 01:26:50 -07001764 mirror::ArtField* f = FromFieldId(field_id);
Elliott Hughes0cf74332012-02-23 23:14:00 -08001765
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001766 mirror::Class* receiver_class = c;
Ian Rogersc0542af2014-09-03 16:16:56 -07001767 if (receiver_class == nullptr && o != nullptr) {
Elliott Hughes0cf74332012-02-23 23:14:00 -08001768 receiver_class = o->GetClass();
1769 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001770 // TODO: should we give up now if receiver_class is nullptr?
1771 if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
Elliott Hughes0cf74332012-02-23 23:14:00 -08001772 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001773 return JDWP::ERR_INVALID_FIELDID;
1774 }
Elliott Hughesaed4be92011-12-02 16:16:23 -08001775
Elliott Hughes0cf74332012-02-23 23:14:00 -08001776 // The RI only enforces the static/non-static mismatch in one direction.
1777 // TODO: should we change the tests and check both?
1778 if (is_static) {
1779 if (!f->IsStatic()) {
1780 return JDWP::ERR_INVALID_FIELDID;
1781 }
1782 } else {
1783 if (f->IsStatic()) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001784 LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field "
1785 << PrettyField(f);
Elliott Hughes0cf74332012-02-23 23:14:00 -08001786 }
1787 }
jeffhao0dfbb7e2012-11-28 15:26:03 -08001788 if (f->IsStatic()) {
1789 o = f->GetDeclaringClass();
1790 }
Elliott Hughes0cf74332012-02-23 23:14:00 -08001791
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001792 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
Jeff Hao579b0242013-11-18 13:16:49 -08001793 JValue field_value;
1794 if (tag == JDWP::JT_VOID) {
1795 LOG(FATAL) << "Unknown tag: " << tag;
1796 } else if (!IsPrimitiveTag(tag)) {
1797 field_value.SetL(f->GetObject(o));
1798 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1799 field_value.SetJ(f->Get64(o));
Elliott Hughesaed4be92011-12-02 16:16:23 -08001800 } else {
Jeff Hao579b0242013-11-18 13:16:49 -08001801 field_value.SetI(f->Get32(o));
Elliott Hughesaed4be92011-12-02 16:16:23 -08001802 }
Jeff Hao579b0242013-11-18 13:16:49 -08001803 Dbg::OutputJValue(tag, &field_value, pReply);
1804
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001805 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001806}
1807
Elliott Hughes88d63092013-01-09 09:55:54 -08001808JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001809 JDWP::ExpandBuf* pReply) {
Elliott Hughes88d63092013-01-09 09:55:54 -08001810 return GetFieldValueImpl(0, object_id, field_id, pReply, false);
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001811}
1812
Ian Rogersc0542af2014-09-03 16:16:56 -07001813JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1814 JDWP::ExpandBuf* pReply) {
Elliott Hughes88d63092013-01-09 09:55:54 -08001815 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001816}
1817
Elliott Hughes88d63092013-01-09 09:55:54 -08001818static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001819 uint64_t value, int width, bool is_static)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001820 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001821 JDWP::JdwpError error;
1822 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1823 if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001824 return JDWP::ERR_INVALID_OBJECT;
1825 }
Brian Carlstromea46f952013-07-30 01:26:50 -07001826 mirror::ArtField* f = FromFieldId(field_id);
Elliott Hughes0cf74332012-02-23 23:14:00 -08001827
1828 // The RI only enforces the static/non-static mismatch in one direction.
1829 // TODO: should we change the tests and check both?
1830 if (is_static) {
1831 if (!f->IsStatic()) {
1832 return JDWP::ERR_INVALID_FIELDID;
1833 }
1834 } else {
1835 if (f->IsStatic()) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001836 LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field " << PrettyField(f);
Elliott Hughes0cf74332012-02-23 23:14:00 -08001837 }
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001838 }
jeffhao0dfbb7e2012-11-28 15:26:03 -08001839 if (f->IsStatic()) {
1840 o = f->GetDeclaringClass();
1841 }
Elliott Hughesaed4be92011-12-02 16:16:23 -08001842
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001843 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
Elliott Hughesaed4be92011-12-02 16:16:23 -08001844
1845 if (IsPrimitiveTag(tag)) {
1846 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
Elliott Hughes1bac54f2012-03-16 12:48:31 -07001847 CHECK_EQ(width, 8);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001848 // Debugging can't use transactional mode (runtime only).
1849 f->Set64<false>(o, value);
Elliott Hughesaed4be92011-12-02 16:16:23 -08001850 } else {
Elliott Hughes1bac54f2012-03-16 12:48:31 -07001851 CHECK_LE(width, 4);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001852 // Debugging can't use transactional mode (runtime only).
1853 f->Set32<false>(o, value);
Elliott Hughesaed4be92011-12-02 16:16:23 -08001854 }
1855 } else {
Ian Rogersc0542af2014-09-03 16:16:56 -07001856 mirror::Object* v = gRegistry->Get<mirror::Object*>(value, &error);
1857 if (error != JDWP::ERR_NONE) {
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001858 return JDWP::ERR_INVALID_OBJECT;
1859 }
Ian Rogersc0542af2014-09-03 16:16:56 -07001860 if (v != nullptr) {
Mathieu Chartier61c5ebc2014-06-05 17:42:53 -07001861 mirror::Class* field_type;
1862 {
1863 StackHandleScope<3> hs(Thread::Current());
1864 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1865 HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1866 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1867 field_type = FieldHelper(h_f).GetType();
1868 }
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08001869 if (!field_type->IsAssignableFrom(v->GetClass())) {
1870 return JDWP::ERR_INVALID_OBJECT;
1871 }
1872 }
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +01001873 // Debugging can't use transactional mode (runtime only).
1874 f->SetObject<false>(o, v);
Elliott Hughesaed4be92011-12-02 16:16:23 -08001875 }
Elliott Hughes3d1ca6d2012-02-13 15:43:19 -08001876
1877 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001878}
1879
Elliott Hughes88d63092013-01-09 09:55:54 -08001880JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001881 int width) {
Elliott Hughes88d63092013-01-09 09:55:54 -08001882 return SetFieldValueImpl(object_id, field_id, value, width, false);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001883}
1884
Elliott Hughes88d63092013-01-09 09:55:54 -08001885JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1886 return SetFieldValueImpl(0, field_id, value, width, true);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001887}
1888
Elliott Hughes88d63092013-01-09 09:55:54 -08001889std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
Ian Rogersc0542af2014-09-03 16:16:56 -07001890 JDWP::JdwpError error;
1891 mirror::String* s = gRegistry->Get<mirror::String*>(string_id, &error);
1892 CHECK(s != nullptr) << error;
Elliott Hughes68fdbd02011-11-29 19:22:47 -08001893 return s->ToModifiedUtf8();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001894}
1895
Jeff Hao579b0242013-11-18 13:16:49 -08001896void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1897 if (IsPrimitiveTag(tag)) {
1898 expandBufAdd1(pReply, tag);
1899 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1900 expandBufAdd1(pReply, return_value->GetI());
1901 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1902 expandBufAdd2BE(pReply, return_value->GetI());
1903 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1904 expandBufAdd4BE(pReply, return_value->GetI());
1905 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1906 expandBufAdd8BE(pReply, return_value->GetJ());
1907 } else {
1908 CHECK_EQ(tag, JDWP::JT_VOID);
1909 }
1910 } else {
Ian Rogers98379392014-02-24 16:53:16 -08001911 ScopedObjectAccessUnchecked soa(Thread::Current());
Jeff Hao579b0242013-11-18 13:16:49 -08001912 mirror::Object* value = return_value->GetL();
Ian Rogers98379392014-02-24 16:53:16 -08001913 expandBufAdd1(pReply, TagFromObject(soa, value));
Jeff Hao579b0242013-11-18 13:16:49 -08001914 expandBufAddObjectId(pReply, gRegistry->Add(value));
1915 }
1916}
1917
Ian Rogersc0542af2014-09-03 16:16:56 -07001918JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001919 ScopedObjectAccessUnchecked soa(Thread::Current());
1920 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07001921 JDWP::JdwpError error;
1922 Thread* thread = DecodeThread(soa, thread_id, &error);
1923 UNUSED(thread);
Elliott Hughes221229c2013-01-08 18:17:50 -08001924 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1925 return error;
Elliott Hughesa2e54f62011-11-17 13:01:30 -08001926 }
Elliott Hughes221229c2013-01-08 18:17:50 -08001927
1928 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
Ian Rogersc0542af2014-09-03 16:16:56 -07001929 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1930 CHECK(thread_object != nullptr) << error;
Brian Carlstromea46f952013-07-30 01:26:50 -07001931 mirror::ArtField* java_lang_Thread_name_field =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001932 soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1933 mirror::String* s =
1934 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
Ian Rogersc0542af2014-09-03 16:16:56 -07001935 if (s != nullptr) {
1936 *name = s->ToModifiedUtf8();
Elliott Hughes221229c2013-01-08 18:17:50 -08001937 }
1938 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001939}
1940
Elliott Hughes221229c2013-01-08 18:17:50 -08001941JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001942 ScopedObjectAccess soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07001943 JDWP::JdwpError error;
1944 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1945 if (error != JDWP::ERR_NONE) {
Elliott Hughes2435a572012-02-17 16:07:41 -08001946 return JDWP::ERR_INVALID_OBJECT;
1947 }
Ian Rogers98379392014-02-24 16:53:16 -08001948 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
Elliott Hughes2435a572012-02-17 16:07:41 -08001949 // Okay, so it's an object, but is it actually a thread?
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001950 {
1951 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07001952 Thread* thread = DecodeThread(soa, thread_id, &error);
1953 UNUSED(thread);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001954 }
Elliott Hughes221229c2013-01-08 18:17:50 -08001955 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1956 // Zombie threads are in the null group.
1957 expandBufAddObjectId(pReply, JDWP::ObjectId(0));
Sebastien Hertz52d131d2014-03-13 16:17:40 +01001958 error = JDWP::ERR_NONE;
1959 } else if (error == JDWP::ERR_NONE) {
1960 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1961 CHECK(c != nullptr);
1962 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001963 CHECK(f != nullptr);
Sebastien Hertz52d131d2014-03-13 16:17:40 +01001964 mirror::Object* group = f->GetObject(thread_object);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07001965 CHECK(group != nullptr);
Sebastien Hertz52d131d2014-03-13 16:17:40 +01001966 JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1967 expandBufAddObjectId(pReply, thread_group_id);
Elliott Hughes221229c2013-01-08 18:17:50 -08001968 }
Ian Rogers98379392014-02-24 16:53:16 -08001969 soa.Self()->EndAssertNoThreadSuspension(old_cause);
Sebastien Hertz52d131d2014-03-13 16:17:40 +01001970 return error;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001971}
1972
Elliott Hughes88d63092013-01-09 09:55:54 -08001973std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001974 ScopedObjectAccess soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07001975 JDWP::JdwpError error;
1976 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
1977 CHECK(thread_group != nullptr) << error;
Ian Rogers98379392014-02-24 16:53:16 -08001978 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1979 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1980 CHECK(c != nullptr);
Brian Carlstromea46f952013-07-30 01:26:50 -07001981 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
Ian Rogersc0542af2014-09-03 16:16:56 -07001982 CHECK(f != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001983 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
Ian Rogers98379392014-02-24 16:53:16 -08001984 soa.Self()->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes499c5132011-11-17 14:55:11 -08001985 return s->ToModifiedUtf8();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07001986}
1987
Elliott Hughes88d63092013-01-09 09:55:54 -08001988JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
Ian Rogers98379392014-02-24 16:53:16 -08001989 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07001990 JDWP::JdwpError error;
1991 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
1992 CHECK(thread_group != nullptr) << error;
Ian Rogers98379392014-02-24 16:53:16 -08001993 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
1994 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1995 CHECK(c != nullptr);
Brian Carlstromea46f952013-07-30 01:26:50 -07001996 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
Ian Rogersc0542af2014-09-03 16:16:56 -07001997 CHECK(f != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001998 mirror::Object* parent = f->GetObject(thread_group);
Ian Rogers98379392014-02-24 16:53:16 -08001999 soa.Self()->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes4e235312011-12-02 11:34:15 -08002000 return gRegistry->Add(parent);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002001}
2002
2003JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002004 ScopedObjectAccessUnchecked soa(Thread::Current());
Brian Carlstromea46f952013-07-30 01:26:50 -07002005 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002006 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
Ian Rogers365c1022012-06-22 15:05:28 -07002007 return gRegistry->Add(group);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002008}
2009
2010JDWP::ObjectId Dbg::GetMainThreadGroupId() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002011 ScopedObjectAccess soa(Thread::Current());
Brian Carlstromea46f952013-07-30 01:26:50 -07002012 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002013 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
Ian Rogers365c1022012-06-22 15:05:28 -07002014 return gRegistry->Add(group);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002015}
2016
Jeff Hao920af3e2013-08-28 15:46:38 -07002017JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2018 switch (state) {
2019 case kBlocked:
2020 return JDWP::TS_MONITOR;
2021 case kNative:
2022 case kRunnable:
2023 case kSuspended:
2024 return JDWP::TS_RUNNING;
2025 case kSleeping:
2026 return JDWP::TS_SLEEPING;
2027 case kStarting:
2028 case kTerminated:
2029 return JDWP::TS_ZOMBIE;
2030 case kTimedWaiting:
Sebastien Hertzbae182c2013-12-17 10:42:03 +01002031 case kWaitingForCheckPointsToRun:
Jeff Hao920af3e2013-08-28 15:46:38 -07002032 case kWaitingForDebuggerSend:
2033 case kWaitingForDebuggerSuspension:
2034 case kWaitingForDebuggerToAttach:
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01002035 case kWaitingForDeoptimization:
Jeff Hao920af3e2013-08-28 15:46:38 -07002036 case kWaitingForGcToComplete:
Jeff Hao920af3e2013-08-28 15:46:38 -07002037 case kWaitingForJniOnLoad:
Sebastien Hertzbae182c2013-12-17 10:42:03 +01002038 case kWaitingForMethodTracingStart:
Jeff Hao920af3e2013-08-28 15:46:38 -07002039 case kWaitingForSignalCatcherOutput:
2040 case kWaitingInMainDebuggerLoop:
2041 case kWaitingInMainSignalCatcherLoop:
2042 case kWaitingPerformingGc:
2043 case kWaiting:
2044 return JDWP::TS_WAIT;
2045 // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2046 }
2047 LOG(FATAL) << "Unknown thread state: " << state;
2048 return JDWP::TS_ZOMBIE;
2049}
2050
Sebastien Hertz52d131d2014-03-13 16:17:40 +01002051JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2052 JDWP::JdwpSuspendStatus* pSuspendStatus) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002053 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes499c5132011-11-17 14:55:11 -08002054
Elliott Hughes9e0c1752013-01-09 14:02:58 -08002055 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2056
Ian Rogers50b35e22012-10-04 10:09:15 -07002057 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002058 JDWP::JdwpError error;
2059 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002060 if (error != JDWP::ERR_NONE) {
2061 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2062 *pThreadStatus = JDWP::TS_ZOMBIE;
Elliott Hughes221229c2013-01-08 18:17:50 -08002063 return JDWP::ERR_NONE;
2064 }
2065 return error;
Elliott Hughes499c5132011-11-17 14:55:11 -08002066 }
2067
Elliott Hughes9e0c1752013-01-09 14:02:58 -08002068 if (IsSuspendedForDebugger(soa, thread)) {
2069 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
Elliott Hughes499c5132011-11-17 14:55:11 -08002070 }
2071
Jeff Hao920af3e2013-08-28 15:46:38 -07002072 *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
Elliott Hughes221229c2013-01-08 18:17:50 -08002073 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002074}
2075
Elliott Hughes221229c2013-01-08 18:17:50 -08002076JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002077 ScopedObjectAccess soa(Thread::Current());
Ian Rogers50b35e22012-10-04 10:09:15 -07002078 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002079 JDWP::JdwpError error;
2080 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002081 if (error != JDWP::ERR_NONE) {
2082 return error;
Elliott Hughes2435a572012-02-17 16:07:41 -08002083 }
Ian Rogers50b35e22012-10-04 10:09:15 -07002084 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002085 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
Elliott Hughes2435a572012-02-17 16:07:41 -08002086 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002087}
2088
Elliott Hughesf9501702013-01-11 11:22:27 -08002089JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2090 ScopedObjectAccess soa(Thread::Current());
2091 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002092 JDWP::JdwpError error;
2093 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughesf9501702013-01-11 11:22:27 -08002094 if (error != JDWP::ERR_NONE) {
2095 return error;
2096 }
Ian Rogersdd7624d2014-03-14 17:43:00 -07002097 thread->Interrupt(soa.Self());
Elliott Hughesf9501702013-01-11 11:22:27 -08002098 return JDWP::ERR_NONE;
2099}
2100
Sebastien Hertz070f7322014-09-09 12:08:49 +02002101static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2102 mirror::Object* desired_thread_group, mirror::Object* peer)
2103 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2104 // Do we want threads from all thread groups?
2105 if (desired_thread_group == nullptr) {
2106 return true;
2107 }
2108 mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2109 DCHECK(thread_group_field != nullptr);
2110 mirror::Object* group = thread_group_field->GetObject(peer);
2111 return (group == desired_thread_group);
2112}
2113
Ian Rogersc0542af2014-09-03 16:16:56 -07002114void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002115 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07002116 JDWP::JdwpError error;
2117 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
2118 CHECK_EQ(error, JDWP::ERR_NONE);
Sebastien Hertz070f7322014-09-09 12:08:49 +02002119 std::list<Thread*> all_threads_list;
2120 {
2121 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2122 all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2123 }
2124 for (Thread* t : all_threads_list) {
2125 if (t == Dbg::GetDebugThread()) {
2126 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2127 // query all threads, so it's easier if we just don't tell them about this thread.
2128 continue;
2129 }
2130 if (t->IsStillStarting()) {
2131 // This thread is being started (and has been registered in the thread list). However, it is
2132 // not completely started yet so we must ignore it.
2133 continue;
2134 }
2135 mirror::Object* peer = t->GetPeer();
2136 if (peer == nullptr) {
2137 // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2138 // this thread yet.
2139 // TODO: if we identified threads to the debugger by their Thread*
2140 // rather than their peer's mirror::Object*, we could fix this.
2141 // Doing so might help us report ZOMBIE threads too.
2142 continue;
2143 }
2144 if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2145 thread_ids->push_back(gRegistry->Add(peer));
2146 }
2147 }
Elliott Hughescaf76542012-06-28 16:08:22 -07002148}
Elliott Hughesa2155262011-11-16 16:26:58 -08002149
Ian Rogersc0542af2014-09-03 16:16:56 -07002150void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id,
2151 std::vector<JDWP::ObjectId>* child_thread_group_ids) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002152 ScopedObjectAccess soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07002153 JDWP::JdwpError error;
2154 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
2155 CHECK(thread_group != nullptr) << error;
Elliott Hughescaf76542012-06-28 16:08:22 -07002156
2157 // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
Brian Carlstromea46f952013-07-30 01:26:50 -07002158 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002159 mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
Elliott Hughescaf76542012-06-28 16:08:22 -07002160
2161 // Get the array and size out of the ArrayList<ThreadGroup>...
Brian Carlstromea46f952013-07-30 01:26:50 -07002162 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2163 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002164 mirror::ObjectArray<mirror::Object>* groups_array =
2165 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
Elliott Hughescaf76542012-06-28 16:08:22 -07002166 const int32_t size = size_field->GetInt(groups_array_list);
2167
2168 // Copy the first 'size' elements out of the array into the result.
2169 for (int32_t i = 0; i < size; ++i) {
Ian Rogersc0542af2014-09-03 16:16:56 -07002170 child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
Elliott Hughesa2155262011-11-16 16:26:58 -08002171 }
2172}
2173
Ian Rogersc0542af2014-09-03 16:16:56 -07002174static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0399dde2012-06-06 17:09:28 -07002175 struct CountStackDepthVisitor : public StackVisitor {
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002176 explicit CountStackDepthVisitor(Thread* thread)
Ian Rogersc0542af2014-09-03 16:16:56 -07002177 : StackVisitor(thread, nullptr), depth(0) {}
Ian Rogers0399dde2012-06-06 17:09:28 -07002178
Elliott Hughes64f574f2013-02-20 14:57:12 -08002179 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2180 // annotalysis.
2181 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers0399dde2012-06-06 17:09:28 -07002182 if (!GetMethod()->IsRuntimeMethod()) {
Elliott Hughesf8a2df72011-12-01 12:19:54 -08002183 ++depth;
2184 }
Elliott Hughes530fa002012-03-12 11:44:49 -07002185 return true;
Elliott Hughesa2e54f62011-11-17 13:01:30 -08002186 }
2187 size_t depth;
2188 };
Elliott Hughes08fc03a2012-06-26 17:34:00 -07002189
Ian Rogers7a22fa62013-01-23 12:16:16 -08002190 CountStackDepthVisitor visitor(thread);
Ian Rogers0399dde2012-06-06 17:09:28 -07002191 visitor.WalkStack();
Elliott Hughesa2e54f62011-11-17 13:01:30 -08002192 return visitor.depth;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002193}
2194
Ian Rogersc0542af2014-09-03 16:16:56 -07002195JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002196 ScopedObjectAccess soa(Thread::Current());
jeffhaoa77f0f62012-12-05 17:19:31 -08002197 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002198 JDWP::JdwpError error;
2199 *result = 0;
2200 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002201 if (error != JDWP::ERR_NONE) {
2202 return error;
2203 }
Elliott Hughesf15f4a02013-01-09 10:09:38 -08002204 if (!IsSuspendedForDebugger(soa, thread)) {
2205 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2206 }
Ian Rogersc0542af2014-09-03 16:16:56 -07002207 *result = GetStackDepth(thread);
Elliott Hughes221229c2013-01-08 18:17:50 -08002208 return JDWP::ERR_NONE;
Elliott Hughes86964332012-02-15 19:37:42 -08002209}
2210
Ian Rogers306057f2012-11-26 12:45:53 -08002211JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2212 size_t frame_count, JDWP::ExpandBuf* buf) {
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002213 class GetFrameVisitor : public StackVisitor {
2214 public:
Ian Rogers7a22fa62013-01-23 12:16:16 -08002215 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
Ian Rogersb726dcb2012-09-05 08:57:23 -07002216 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogersc0542af2014-09-03 16:16:56 -07002217 : StackVisitor(thread, nullptr), depth_(0),
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002218 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2219 expandBufAdd4BE(buf_, frame_count_);
Elliott Hughes03181a82011-11-17 17:22:21 -08002220 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002221
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002222 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2223 // annotalysis.
2224 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers0399dde2012-06-06 17:09:28 -07002225 if (GetMethod()->IsRuntimeMethod()) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -07002226 return true; // The debugger can't do anything useful with a frame that has no Method*.
Elliott Hughes03181a82011-11-17 17:22:21 -08002227 }
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002228 if (depth_ >= start_frame_ + frame_count_) {
Elliott Hughes530fa002012-03-12 11:44:49 -07002229 return false;
Elliott Hughes03181a82011-11-17 17:22:21 -08002230 }
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002231 if (depth_ >= start_frame_) {
2232 JDWP::FrameId frame_id(GetFrameId());
2233 JDWP::JdwpLocation location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002234 SetLocation(&location, GetMethod(), GetDexPc());
Ian Rogersef7d42f2014-01-06 12:55:46 -08002235 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002236 expandBufAdd8BE(buf_, frame_id);
2237 expandBufAddLocation(buf_, location);
2238 }
2239 ++depth_;
Elliott Hughes530fa002012-03-12 11:44:49 -07002240 return true;
Elliott Hughes03181a82011-11-17 17:22:21 -08002241 }
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002242
2243 private:
2244 size_t depth_;
2245 const size_t start_frame_;
2246 const size_t frame_count_;
2247 JDWP::ExpandBuf* buf_;
Elliott Hughes03181a82011-11-17 17:22:21 -08002248 };
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002249
2250 ScopedObjectAccessUnchecked soa(Thread::Current());
jeffhaoa77f0f62012-12-05 17:19:31 -08002251 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002252 JDWP::JdwpError error;
2253 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002254 if (error != JDWP::ERR_NONE) {
2255 return error;
2256 }
Elliott Hughesf15f4a02013-01-09 10:09:38 -08002257 if (!IsSuspendedForDebugger(soa, thread)) {
2258 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2259 }
Ian Rogers7a22fa62013-01-23 12:16:16 -08002260 GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
Ian Rogers0399dde2012-06-06 17:09:28 -07002261 visitor.WalkStack();
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002262 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002263}
2264
2265JDWP::ObjectId Dbg::GetThreadSelfId() {
Mathieu Chartierdbe6f462012-09-25 16:54:50 -07002266 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogerscfaa4552012-11-26 21:00:08 -08002267 return gRegistry->Add(soa.Self()->GetPeer());
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002268}
2269
Elliott Hughes475fc232011-10-25 15:00:35 -07002270void Dbg::SuspendVM() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002271 Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002272}
2273
2274void Dbg::ResumeVM() {
Elliott Hughesc61a2672012-06-21 14:52:29 -07002275 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002276}
2277
Elliott Hughes221229c2013-01-08 18:17:50 -08002278JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
Ian Rogersf3d874c2014-07-17 18:52:42 -07002279 Thread* self = Thread::Current();
Ian Rogersc0542af2014-09-03 16:16:56 -07002280 ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002281 {
Ian Rogersf3d874c2014-07-17 18:52:42 -07002282 ScopedObjectAccess soa(self);
Ian Rogersc0542af2014-09-03 16:16:56 -07002283 JDWP::JdwpError error;
2284 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
Elliott Hughes4e235312011-12-02 11:34:15 -08002285 }
Ian Rogersc0542af2014-09-03 16:16:56 -07002286 if (peer.get() == nullptr) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002287 return JDWP::ERR_THREAD_NOT_ALIVE;
2288 }
Ian Rogersf3d874c2014-07-17 18:52:42 -07002289 // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2290 // trying to suspend this one.
2291 MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
Elliott Hughesf327e072013-01-09 16:01:26 -08002292 bool timed_out;
Brian Carlstromba32de42014-08-27 23:43:46 -07002293 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2294 Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2295 &timed_out);
Ian Rogersc0542af2014-09-03 16:16:56 -07002296 if (thread != nullptr) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002297 return JDWP::ERR_NONE;
Elliott Hughesf327e072013-01-09 16:01:26 -08002298 } else if (timed_out) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002299 return JDWP::ERR_INTERNAL;
2300 } else {
2301 return JDWP::ERR_THREAD_NOT_ALIVE;
2302 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002303}
2304
Elliott Hughes221229c2013-01-08 18:17:50 -08002305void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002306 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogersc0542af2014-09-03 16:16:56 -07002307 JDWP::JdwpError error;
2308 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2309 CHECK(peer != nullptr) << error;
jeffhaoa77f0f62012-12-05 17:19:31 -08002310 Thread* thread;
2311 {
2312 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2313 thread = Thread::FromManagedThread(soa, peer);
2314 }
Ian Rogersc0542af2014-09-03 16:16:56 -07002315 if (thread == nullptr) {
Elliott Hughes4e235312011-12-02 11:34:15 -08002316 LOG(WARNING) << "No such thread for resume: " << peer;
2317 return;
2318 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002319 bool needs_resume;
2320 {
Ian Rogers50b35e22012-10-04 10:09:15 -07002321 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002322 needs_resume = thread->GetSuspendCount() > 0;
2323 }
2324 if (needs_resume) {
Elliott Hughes546b9862012-06-20 16:06:13 -07002325 Runtime::Current()->GetThreadList()->Resume(thread, true);
2326 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002327}
2328
2329void Dbg::SuspendSelf() {
Elliott Hughes475fc232011-10-25 15:00:35 -07002330 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002331}
2332
Ian Rogers0399dde2012-06-06 17:09:28 -07002333struct GetThisVisitor : public StackVisitor {
Ian Rogers7a22fa62013-01-23 12:16:16 -08002334 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
Ian Rogersb726dcb2012-09-05 08:57:23 -07002335 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogersc0542af2014-09-03 16:16:56 -07002336 : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id) {}
Ian Rogers0399dde2012-06-06 17:09:28 -07002337
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002338 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2339 // annotalysis.
2340 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002341 if (frame_id != GetFrameId()) {
Ian Rogers0399dde2012-06-06 17:09:28 -07002342 return true; // continue
Ian Rogers0399dde2012-06-06 17:09:28 -07002343 } else {
Ian Rogers62d6c772013-02-27 08:32:07 -08002344 this_object = GetThisObject();
2345 return false;
Ian Rogers0399dde2012-06-06 17:09:28 -07002346 }
Elliott Hughes86b00102011-12-05 17:54:26 -08002347 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002348
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002349 mirror::Object* this_object;
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002350 JDWP::FrameId frame_id;
Ian Rogers0399dde2012-06-06 17:09:28 -07002351};
2352
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002353JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2354 JDWP::ObjectId* result) {
2355 ScopedObjectAccessUnchecked soa(Thread::Current());
2356 Thread* thread;
2357 {
Ian Rogers50b35e22012-10-04 10:09:15 -07002358 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002359 JDWP::JdwpError error;
2360 thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002361 if (error != JDWP::ERR_NONE) {
2362 return error;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002363 }
Elliott Hughes9e0c1752013-01-09 14:02:58 -08002364 if (!IsSuspendedForDebugger(soa, thread)) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002365 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2366 }
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002367 }
Ian Rogers700a4022014-05-19 16:49:03 -07002368 std::unique_ptr<Context> context(Context::Create());
Ian Rogers7a22fa62013-01-23 12:16:16 -08002369 GetThisVisitor visitor(thread, context.get(), frame_id);
Ian Rogers0399dde2012-06-06 17:09:28 -07002370 visitor.WalkStack();
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07002371 *result = gRegistry->Add(visitor.this_object);
2372 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002373}
2374
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002375JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2376 JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
Ian Rogers0399dde2012-06-06 17:09:28 -07002377 struct GetLocalVisitor : public StackVisitor {
Ian Rogers98379392014-02-24 16:53:16 -08002378 GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2379 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
Ian Rogersb726dcb2012-09-05 08:57:23 -07002380 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers98379392014-02-24 16:53:16 -08002381 : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002382 buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
Ian Rogersca190662012-06-26 15:45:57 -07002383
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002384 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2385 // annotalysis.
2386 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers0399dde2012-06-06 17:09:28 -07002387 if (GetFrameId() != frame_id_) {
2388 return true; // Not our frame, carry on.
Elliott Hughesdbb40792011-11-18 17:05:22 -08002389 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002390 // TODO: check that the tag is compatible with the actual type of the slot!
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002391 // TODO: check slot is valid for this method or return INVALID_SLOT error.
Brian Carlstromea46f952013-07-30 01:26:50 -07002392 mirror::ArtMethod* m = GetMethod();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002393 if (m->IsNative()) {
2394 // We can't read local value from native method.
2395 error_ = JDWP::ERR_OPAQUE_FRAME;
2396 return false;
2397 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002398 uint16_t reg = DemangleSlot(slot_, m);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002399 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
Ian Rogers0399dde2012-06-06 17:09:28 -07002400 switch (tag_) {
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002401 case JDWP::JT_BOOLEAN: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002402 CHECK_EQ(width_, 1U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002403 uint32_t intVal;
2404 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2405 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2406 JDWP::Set1(buf_+1, intVal != 0);
2407 } else {
2408 VLOG(jdwp) << "failed to get boolean local " << reg;
2409 error_ = kFailureErrorCode;
2410 }
2411 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002412 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002413 case JDWP::JT_BYTE: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002414 CHECK_EQ(width_, 1U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002415 uint32_t intVal;
2416 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2417 VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2418 JDWP::Set1(buf_+1, intVal);
2419 } else {
2420 VLOG(jdwp) << "failed to get byte local " << reg;
2421 error_ = kFailureErrorCode;
2422 }
2423 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002424 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002425 case JDWP::JT_SHORT:
2426 case JDWP::JT_CHAR: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002427 CHECK_EQ(width_, 2U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002428 uint32_t intVal;
2429 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2430 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2431 JDWP::Set2BE(buf_+1, intVal);
2432 } else {
2433 VLOG(jdwp) << "failed to get short/char local " << reg;
2434 error_ = kFailureErrorCode;
Sebastien Hertzaa9b3ae2014-06-13 14:49:27 +02002435 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002436 break;
Sebastien Hertzaa9b3ae2014-06-13 14:49:27 +02002437 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002438 case JDWP::JT_INT: {
2439 CHECK_EQ(width_, 4U);
2440 uint32_t intVal;
2441 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2442 VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2443 JDWP::Set4BE(buf_+1, intVal);
2444 } else {
2445 VLOG(jdwp) << "failed to get int local " << reg;
2446 error_ = kFailureErrorCode;
Sebastien Hertz8ebd94a2014-06-17 09:49:21 +00002447 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002448 break;
Sebastien Hertz8ebd94a2014-06-17 09:49:21 +00002449 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002450 case JDWP::JT_FLOAT: {
2451 CHECK_EQ(width_, 4U);
2452 uint32_t intVal;
2453 if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2454 VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2455 JDWP::Set4BE(buf_+1, intVal);
2456 } else {
2457 VLOG(jdwp) << "failed to get float local " << reg;
2458 error_ = kFailureErrorCode;
2459 }
2460 break;
2461 }
2462 case JDWP::JT_ARRAY:
2463 case JDWP::JT_CLASS_LOADER:
2464 case JDWP::JT_CLASS_OBJECT:
2465 case JDWP::JT_OBJECT:
2466 case JDWP::JT_STRING:
2467 case JDWP::JT_THREAD:
2468 case JDWP::JT_THREAD_GROUP: {
2469 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2470 uint32_t intVal;
2471 if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2472 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2473 VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2474 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2475 LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2476 }
2477 tag_ = TagFromObject(soa_, o);
2478 JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2479 } else {
2480 VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2481 error_ = kFailureErrorCode;
2482 }
2483 break;
2484 }
2485 case JDWP::JT_DOUBLE: {
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002486 CHECK_EQ(width_, 8U);
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002487 uint64_t longVal;
2488 if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2489 VLOG(jdwp) << "get double local " << reg << " = " << longVal;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002490 JDWP::Set8BE(buf_+1, longVal);
2491 } else {
2492 VLOG(jdwp) << "failed to get double local " << reg;
2493 error_ = kFailureErrorCode;
2494 }
2495 break;
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002496 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002497 case JDWP::JT_LONG: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002498 CHECK_EQ(width_, 8U);
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002499 uint64_t longVal;
2500 if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2501 VLOG(jdwp) << "get long local " << reg << " = " << longVal;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002502 JDWP::Set8BE(buf_+1, longVal);
2503 } else {
2504 VLOG(jdwp) << "failed to get long local " << reg;
2505 error_ = kFailureErrorCode;
2506 }
2507 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002508 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002509 default:
2510 LOG(FATAL) << "Unknown tag " << tag_;
2511 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002512 }
2513
2514 // Prepend tag, which may have been updated.
2515 JDWP::Set1(buf_, tag_);
2516 return false;
2517 }
Ian Rogers98379392014-02-24 16:53:16 -08002518 const ScopedObjectAccessUnchecked& soa_;
Ian Rogers0399dde2012-06-06 17:09:28 -07002519 const JDWP::FrameId frame_id_;
2520 const int slot_;
2521 JDWP::JdwpTag tag_;
2522 uint8_t* const buf_;
2523 const size_t width_;
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002524 JDWP::JdwpError error_;
Ian Rogers0399dde2012-06-06 17:09:28 -07002525 };
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002526
2527 ScopedObjectAccessUnchecked soa(Thread::Current());
jeffhaoa77f0f62012-12-05 17:19:31 -08002528 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002529 JDWP::JdwpError error;
2530 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002531 if (error != JDWP::ERR_NONE) {
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002532 return error;
Elliott Hughes221229c2013-01-08 18:17:50 -08002533 }
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002534 // TODO check thread is suspended by the debugger ?
Ian Rogers700a4022014-05-19 16:49:03 -07002535 std::unique_ptr<Context> context(Context::Create());
Ian Rogers98379392014-02-24 16:53:16 -08002536 GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
Ian Rogers0399dde2012-06-06 17:09:28 -07002537 visitor.WalkStack();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002538 return visitor.error_;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002539}
2540
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002541JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2542 JDWP::JdwpTag tag, uint64_t value, size_t width) {
Ian Rogers0399dde2012-06-06 17:09:28 -07002543 struct SetLocalVisitor : public StackVisitor {
Ian Rogers7a22fa62013-01-23 12:16:16 -08002544 SetLocalVisitor(Thread* thread, Context* context,
Ian Rogers0399dde2012-06-06 17:09:28 -07002545 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
Ian Rogersca190662012-06-26 15:45:57 -07002546 size_t width)
Ian Rogersb726dcb2012-09-05 08:57:23 -07002547 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers7a22fa62013-01-23 12:16:16 -08002548 : StackVisitor(thread, context),
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002549 frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2550 error_(JDWP::ERR_NONE) {}
Ian Rogersca190662012-06-26 15:45:57 -07002551
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002552 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2553 // annotalysis.
2554 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers0399dde2012-06-06 17:09:28 -07002555 if (GetFrameId() != frame_id_) {
2556 return true; // Not our frame, carry on.
2557 }
2558 // TODO: check that the tag is compatible with the actual type of the slot!
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002559 // TODO: check slot is valid for this method or return INVALID_SLOT error.
Brian Carlstromea46f952013-07-30 01:26:50 -07002560 mirror::ArtMethod* m = GetMethod();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002561 if (m->IsNative()) {
2562 // We can't read local value from native method.
2563 error_ = JDWP::ERR_OPAQUE_FRAME;
2564 return false;
2565 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002566 uint16_t reg = DemangleSlot(slot_, m);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002567 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
Ian Rogers0399dde2012-06-06 17:09:28 -07002568 switch (tag_) {
2569 case JDWP::JT_BOOLEAN:
2570 case JDWP::JT_BYTE:
2571 CHECK_EQ(width_, 1U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002572 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2573 VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2574 << static_cast<uint32_t>(value_);
2575 error_ = kFailureErrorCode;
2576 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002577 break;
2578 case JDWP::JT_SHORT:
2579 case JDWP::JT_CHAR:
2580 CHECK_EQ(width_, 2U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002581 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2582 VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2583 << static_cast<uint32_t>(value_);
2584 error_ = kFailureErrorCode;
2585 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002586 break;
2587 case JDWP::JT_INT:
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002588 CHECK_EQ(width_, 4U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002589 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2590 VLOG(jdwp) << "failed to set int local " << reg << " = "
2591 << static_cast<uint32_t>(value_);
2592 error_ = kFailureErrorCode;
2593 }
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002594 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002595 case JDWP::JT_FLOAT:
2596 CHECK_EQ(width_, 4U);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002597 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2598 VLOG(jdwp) << "failed to set float local " << reg << " = "
2599 << static_cast<uint32_t>(value_);
2600 error_ = kFailureErrorCode;
2601 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002602 break;
2603 case JDWP::JT_ARRAY:
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002604 case JDWP::JT_CLASS_LOADER:
2605 case JDWP::JT_CLASS_OBJECT:
Ian Rogers0399dde2012-06-06 17:09:28 -07002606 case JDWP::JT_OBJECT:
2607 case JDWP::JT_STRING:
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002608 case JDWP::JT_THREAD:
2609 case JDWP::JT_THREAD_GROUP: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002610 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
Ian Rogersc0542af2014-09-03 16:16:56 -07002611 JDWP::JdwpError error;
2612 mirror::Object* o =
2613 gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_), &error);
2614 if (error != JDWP::ERR_NONE) {
2615 VLOG(jdwp) << tag_ << " object " << value_ << " is an invalid object";
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002616 error_ = JDWP::ERR_INVALID_OBJECT;
2617 } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2618 kReferenceVReg)) {
2619 VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2620 error_ = kFailureErrorCode;
Ian Rogers0399dde2012-06-06 17:09:28 -07002621 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002622 break;
Ian Rogers0399dde2012-06-06 17:09:28 -07002623 }
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002624 case JDWP::JT_DOUBLE: {
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002625 CHECK_EQ(width_, 8U);
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002626 bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002627 if (!success) {
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002628 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002629 error_ = kFailureErrorCode;
2630 }
Ian Rogers2bcb4a42012-11-08 10:39:18 -08002631 break;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002632 }
2633 case JDWP::JT_LONG: {
Ian Rogers0399dde2012-06-06 17:09:28 -07002634 CHECK_EQ(width_, 8U);
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002635 bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002636 if (!success) {
Sebastien Hertzc901dd72014-07-16 11:56:07 +02002637 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002638 error_ = kFailureErrorCode;
2639 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002640 break;
Sebastien Hertz0bcb2902014-06-17 15:52:45 +02002641 }
Ian Rogers0399dde2012-06-06 17:09:28 -07002642 default:
2643 LOG(FATAL) << "Unknown tag " << tag_;
2644 break;
2645 }
2646 return false;
2647 }
2648
2649 const JDWP::FrameId frame_id_;
2650 const int slot_;
2651 const JDWP::JdwpTag tag_;
2652 const uint64_t value_;
2653 const size_t width_;
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002654 JDWP::JdwpError error_;
Ian Rogers0399dde2012-06-06 17:09:28 -07002655 };
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002656
2657 ScopedObjectAccessUnchecked soa(Thread::Current());
jeffhaoa77f0f62012-12-05 17:19:31 -08002658 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07002659 JDWP::JdwpError error;
2660 Thread* thread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08002661 if (error != JDWP::ERR_NONE) {
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002662 return error;
Elliott Hughes221229c2013-01-08 18:17:50 -08002663 }
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002664 // TODO check thread is suspended by the debugger ?
Ian Rogers700a4022014-05-19 16:49:03 -07002665 std::unique_ptr<Context> context(Context::Create());
Ian Rogers7a22fa62013-01-23 12:16:16 -08002666 SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
Ian Rogers0399dde2012-06-06 17:09:28 -07002667 visitor.WalkStack();
Sebastien Hertzcb19ebf2014-03-11 15:26:35 +01002668 return visitor.error_;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002669}
2670
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002671JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
2672 // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
2673 // there's no point adding it to the registry and burning through ids.
2674 // When registering an event request with an instance filter, we've been given an existing object
2675 // id so it must already be present in the registry when the event fires.
2676 JDWP::ObjectId this_id = 0;
2677 if (this_object != nullptr && gRegistry->Contains(this_object)) {
2678 this_id = gRegistry->Add(this_object);
2679 }
2680 return this_id;
2681}
2682
Ian Rogersef7d42f2014-01-06 12:55:46 -08002683void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
Jeff Hao579b0242013-11-18 13:16:49 -08002684 int event_flags, const JValue* return_value) {
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002685 if (!IsDebuggerActive()) {
2686 return;
2687 }
2688 DCHECK(m != nullptr);
2689 DCHECK_EQ(m->IsStatic(), this_object == nullptr);
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002690 JDWP::JdwpLocation location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002691 SetLocation(&location, m, dex_pc);
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002692
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002693 // We need 'this' for InstanceOnly filters only.
2694 JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
Jeff Hao579b0242013-11-18 13:16:49 -08002695 gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002696}
2697
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002698void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2699 mirror::Object* this_object, mirror::ArtField* f) {
2700 if (!IsDebuggerActive()) {
2701 return;
2702 }
2703 DCHECK(m != nullptr);
2704 DCHECK(f != nullptr);
2705 JDWP::JdwpLocation location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002706 SetLocation(&location, m, dex_pc);
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002707
2708 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2709 JDWP::FieldId field_id = ToFieldId(f);
2710 JDWP::ObjectId this_id = gRegistry->Add(this_object);
2711
2712 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
2713}
2714
2715void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2716 mirror::Object* this_object, mirror::ArtField* f,
2717 const JValue* field_value) {
2718 if (!IsDebuggerActive()) {
2719 return;
2720 }
2721 DCHECK(m != nullptr);
2722 DCHECK(f != nullptr);
2723 DCHECK(field_value != nullptr);
2724 JDWP::JdwpLocation location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002725 SetLocation(&location, m, dex_pc);
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002726
2727 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2728 JDWP::FieldId field_id = ToFieldId(f);
2729 JDWP::ObjectId this_id = gRegistry->Add(this_object);
2730
2731 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
2732}
2733
2734void Dbg::PostException(const ThrowLocation& throw_location,
Brian Carlstromea46f952013-07-30 01:26:50 -07002735 mirror::ArtMethod* catch_method,
Elliott Hughes64f574f2013-02-20 14:57:12 -08002736 uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
Elliott Hughesc0f09332012-03-26 13:27:06 -07002737 if (!IsDebuggerActive()) {
Ian Rogers0ad5bb82011-12-07 10:16:32 -08002738 return;
2739 }
Elliott Hughes4740cdf2011-12-07 14:07:12 -08002740
Ian Rogers62d6c772013-02-27 08:32:07 -08002741 JDWP::JdwpLocation jdwp_throw_location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002742 SetLocation(&jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
Elliott Hughesd07986f2011-12-06 18:27:45 -08002743 JDWP::JdwpLocation catch_location;
Ian Rogersc0542af2014-09-03 16:16:56 -07002744 SetLocation(&catch_location, catch_method, catch_dex_pc);
Elliott Hughesd07986f2011-12-06 18:27:45 -08002745
Sebastien Hertz3f52eaf2014-04-04 17:50:18 +02002746 // We need 'this' for InstanceOnly filters only.
2747 JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
Elliott Hughes64f574f2013-02-20 14:57:12 -08002748 JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2749 JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
Elliott Hughesd07986f2011-12-06 18:27:45 -08002750
Ian Rogers62d6c772013-02-27 08:32:07 -08002751 gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2752 this_id);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002753}
2754
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002755void Dbg::PostClassPrepare(mirror::Class* c) {
Elliott Hughesc0f09332012-03-26 13:27:06 -07002756 if (!IsDebuggerActive()) {
Elliott Hughes4740cdf2011-12-07 14:07:12 -08002757 return;
2758 }
2759
Elliott Hughes3d30d9b2011-12-07 17:35:48 -08002760 // OLD-TODO - we currently always send both "verified" and "prepared" since
Elliott Hughes4740cdf2011-12-07 14:07:12 -08002761 // debuggers seem to like that. There might be some advantage to honesty,
2762 // since the class may not yet be verified.
2763 int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
Sebastien Hertz4d8fd492014-03-28 16:29:41 +01002764 JDWP::JdwpTypeTag tag = GetTypeTag(c);
Ian Rogers1ff3c982014-08-12 02:30:58 -07002765 std::string temp;
2766 gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07002767}
2768
Ian Rogers62d6c772013-02-27 08:32:07 -08002769void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
Sebastien Hertz8379b222014-02-24 17:38:15 +01002770 mirror::ArtMethod* m, uint32_t dex_pc,
2771 int event_flags, const JValue* return_value) {
Ian Rogers62d6c772013-02-27 08:32:07 -08002772 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
Elliott Hughes2aa2e392012-02-17 17:15:43 -08002773 return;
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002774 }
2775
Elliott Hughes86964332012-02-15 19:37:42 -08002776 if (IsBreakpoint(m, dex_pc)) {
2777 event_flags |= kBreakpoint;
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002778 }
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002779
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01002780 // If the debugger is single-stepping one of our threads, check to
2781 // see if we're that thread and we've reached a step point.
2782 const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2783 DCHECK(single_step_control != nullptr);
2784 if (single_step_control->is_active) {
2785 CHECK(!m->IsNative());
2786 if (single_step_control->step_depth == JDWP::SD_INTO) {
2787 // Step into method calls. We break when the line number
2788 // or method pointer changes. If we're in SS_MIN mode, we
2789 // always stop.
2790 if (single_step_control->method != m) {
2791 event_flags |= kSingleStep;
2792 VLOG(jdwp) << "SS new method";
2793 } else if (single_step_control->step_size == JDWP::SS_MIN) {
2794 event_flags |= kSingleStep;
2795 VLOG(jdwp) << "SS new instruction";
Sebastien Hertzbb43b432014-04-14 11:59:08 +02002796 } else if (single_step_control->ContainsDexPc(dex_pc)) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01002797 event_flags |= kSingleStep;
2798 VLOG(jdwp) << "SS new line";
2799 }
2800 } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2801 // Step over method calls. We break when the line number is
2802 // different and the frame depth is <= the original frame
2803 // depth. (We can't just compare on the method, because we
2804 // might get unrolled past it by an exception, and it's tricky
2805 // to identify recursion.)
2806
2807 int stack_depth = GetStackDepth(thread);
2808
2809 if (stack_depth < single_step_control->stack_depth) {
2810 // Popped up one or more frames, always trigger.
2811 event_flags |= kSingleStep;
2812 VLOG(jdwp) << "SS method pop";
2813 } else if (stack_depth == single_step_control->stack_depth) {
2814 // Same depth, see if we moved.
2815 if (single_step_control->step_size == JDWP::SS_MIN) {
Elliott Hughes86964332012-02-15 19:37:42 -08002816 event_flags |= kSingleStep;
2817 VLOG(jdwp) << "SS new instruction";
Sebastien Hertzbb43b432014-04-14 11:59:08 +02002818 } else if (single_step_control->ContainsDexPc(dex_pc)) {
Elliott Hughes2435a572012-02-17 16:07:41 -08002819 event_flags |= kSingleStep;
2820 VLOG(jdwp) << "SS new line";
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002821 }
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01002822 }
2823 } else {
2824 CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2825 // Return from the current method. We break when the frame
2826 // depth pops up.
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002827
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01002828 // This differs from the "method exit" break in that it stops
2829 // with the PC at the next instruction in the returned-to
2830 // function, rather than the end of the returning function.
Elliott Hughes86964332012-02-15 19:37:42 -08002831
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01002832 int stack_depth = GetStackDepth(thread);
2833 if (stack_depth < single_step_control->stack_depth) {
2834 event_flags |= kSingleStep;
2835 VLOG(jdwp) << "SS method pop";
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002836 }
2837 }
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002838 }
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002839
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002840 // If there's something interesting going on, see if it matches one
2841 // of the debugger filters.
2842 if (event_flags != 0) {
Sebastien Hertz8379b222014-02-24 17:38:15 +01002843 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
Elliott Hughes91bf6cd2012-02-14 17:27:48 -08002844 }
2845}
2846
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002847size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2848 switch (instrumentation_event) {
2849 case instrumentation::Instrumentation::kMethodEntered:
2850 return &method_enter_event_ref_count_;
2851 case instrumentation::Instrumentation::kMethodExited:
2852 return &method_exit_event_ref_count_;
2853 case instrumentation::Instrumentation::kDexPcMoved:
2854 return &dex_pc_change_event_ref_count_;
2855 case instrumentation::Instrumentation::kFieldRead:
2856 return &field_read_event_ref_count_;
2857 case instrumentation::Instrumentation::kFieldWritten:
2858 return &field_write_event_ref_count_;
2859 case instrumentation::Instrumentation::kExceptionCaught:
2860 return &exception_catch_event_ref_count_;
2861 default:
2862 return nullptr;
2863 }
2864}
2865
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002866// Process request while all mutator threads are suspended.
2867void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01002868 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002869 switch (request.GetKind()) {
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002870 case DeoptimizationRequest::kNothing:
2871 LOG(WARNING) << "Ignoring empty deoptimization request.";
2872 break;
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002873 case DeoptimizationRequest::kRegisterForEvent:
2874 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002875 request.InstrumentationEvent());
2876 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2877 instrumentation_events_ |= request.InstrumentationEvent();
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002878 break;
2879 case DeoptimizationRequest::kUnregisterForEvent:
2880 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002881 request.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002882 instrumentation->RemoveListener(&gDebugInstrumentationListener,
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002883 request.InstrumentationEvent());
2884 instrumentation_events_ &= ~request.InstrumentationEvent();
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002885 break;
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002886 case DeoptimizationRequest::kFullDeoptimization:
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002887 VLOG(jdwp) << "Deoptimize the world ...";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002888 instrumentation->DeoptimizeEverything();
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002889 VLOG(jdwp) << "Deoptimize the world DONE";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002890 break;
2891 case DeoptimizationRequest::kFullUndeoptimization:
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002892 VLOG(jdwp) << "Undeoptimize the world ...";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002893 instrumentation->UndeoptimizeEverything();
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002894 VLOG(jdwp) << "Undeoptimize the world DONE";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002895 break;
2896 case DeoptimizationRequest::kSelectiveDeoptimization:
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002897 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2898 instrumentation->Deoptimize(request.Method());
2899 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002900 break;
2901 case DeoptimizationRequest::kSelectiveUndeoptimization:
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002902 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2903 instrumentation->Undeoptimize(request.Method());
2904 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002905 break;
2906 default:
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002907 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002908 break;
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01002909 }
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01002910}
2911
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002912void Dbg::DelayFullUndeoptimization() {
Brian Carlstrom306db812014-09-05 13:01:41 -07002913 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002914 ++delayed_full_undeoptimization_count_;
2915 DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2916}
2917
2918void Dbg::ProcessDelayedFullUndeoptimizations() {
2919 // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2920 {
Brian Carlstrom306db812014-09-05 13:01:41 -07002921 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002922 while (delayed_full_undeoptimization_count_ > 0) {
2923 DeoptimizationRequest req;
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002924 req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2925 req.SetMethod(nullptr);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002926 RequestDeoptimizationLocked(req);
2927 --delayed_full_undeoptimization_count_;
2928 }
2929 }
2930 ManageDeoptimization();
2931}
2932
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002933void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002934 if (req.GetKind() == DeoptimizationRequest::kNothing) {
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002935 // Nothing to do.
2936 return;
2937 }
Brian Carlstrom306db812014-09-05 13:01:41 -07002938 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002939 RequestDeoptimizationLocked(req);
2940}
2941
2942void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002943 switch (req.GetKind()) {
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002944 case DeoptimizationRequest::kRegisterForEvent: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002945 DCHECK_NE(req.InstrumentationEvent(), 0u);
2946 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002947 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002948 req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002949 if (*counter == 0) {
Sebastien Hertz7d2ae432014-05-15 11:26:34 +02002950 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002951 deoptimization_requests_.size(), req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002952 deoptimization_requests_.push_back(req);
2953 }
2954 *counter = *counter + 1;
2955 break;
2956 }
2957 case DeoptimizationRequest::kUnregisterForEvent: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002958 DCHECK_NE(req.InstrumentationEvent(), 0u);
2959 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002960 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002961 req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002962 *counter = *counter - 1;
2963 if (*counter == 0) {
Sebastien Hertz7d2ae432014-05-15 11:26:34 +02002964 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002965 deoptimization_requests_.size(), req.InstrumentationEvent());
Sebastien Hertz42cd43f2014-05-13 14:15:41 +02002966 deoptimization_requests_.push_back(req);
2967 }
2968 break;
2969 }
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002970 case DeoptimizationRequest::kFullDeoptimization: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002971 DCHECK(req.Method() == nullptr);
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002972 if (full_deoptimization_event_count_ == 0) {
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002973 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2974 << " for full deoptimization";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002975 deoptimization_requests_.push_back(req);
2976 }
2977 ++full_deoptimization_event_count_;
2978 break;
2979 }
2980 case DeoptimizationRequest::kFullUndeoptimization: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002981 DCHECK(req.Method() == nullptr);
Sebastien Hertze713d932014-05-15 10:48:53 +02002982 DCHECK_GT(full_deoptimization_event_count_, 0U);
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002983 --full_deoptimization_event_count_;
2984 if (full_deoptimization_event_count_ == 0) {
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002985 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2986 << " for full undeoptimization";
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002987 deoptimization_requests_.push_back(req);
2988 }
2989 break;
2990 }
2991 case DeoptimizationRequest::kSelectiveDeoptimization: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002992 DCHECK(req.Method() != nullptr);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01002993 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002994 << " for deoptimization of " << PrettyMethod(req.Method());
Sebastien Hertz4d25df32014-03-21 17:44:46 +01002995 deoptimization_requests_.push_back(req);
2996 break;
2997 }
2998 case DeoptimizationRequest::kSelectiveUndeoptimization: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07002999 DCHECK(req.Method() != nullptr);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01003000 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003001 << " for undeoptimization of " << PrettyMethod(req.Method());
Sebastien Hertz4d25df32014-03-21 17:44:46 +01003002 deoptimization_requests_.push_back(req);
3003 break;
3004 }
3005 default: {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003006 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
Sebastien Hertz4d25df32014-03-21 17:44:46 +01003007 break;
3008 }
3009 }
3010}
3011
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003012void Dbg::ManageDeoptimization() {
3013 Thread* const self = Thread::Current();
3014 {
3015 // Avoid suspend/resume if there is no pending request.
Brian Carlstrom306db812014-09-05 13:01:41 -07003016 MutexLock mu(self, *Locks::deoptimization_lock_);
Sebastien Hertz4d25df32014-03-21 17:44:46 +01003017 if (deoptimization_requests_.empty()) {
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003018 return;
3019 }
3020 }
3021 CHECK_EQ(self->GetState(), kRunnable);
3022 self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3023 // We need to suspend mutator threads first.
3024 Runtime* const runtime = Runtime::Current();
3025 runtime->GetThreadList()->SuspendAll();
3026 const ThreadState old_state = self->SetStateUnsafe(kRunnable);
Sebastien Hertz4d25df32014-03-21 17:44:46 +01003027 {
Brian Carlstrom306db812014-09-05 13:01:41 -07003028 MutexLock mu(self, *Locks::deoptimization_lock_);
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01003029 size_t req_index = 0;
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003030 for (DeoptimizationRequest& request : deoptimization_requests_) {
Sebastien Hertz7ec2f1c2014-03-27 20:06:47 +01003031 VLOG(jdwp) << "Process deoptimization request #" << req_index++;
Sebastien Hertz4d25df32014-03-21 17:44:46 +01003032 ProcessDeoptimizationRequest(request);
3033 }
3034 deoptimization_requests_.clear();
3035 }
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003036 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3037 runtime->GetThreadList()->ResumeAll();
3038 self->TransitionFromSuspendedToRunnable();
3039}
3040
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003041static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3042 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003043 const DexFile::CodeItem* code_item = m->GetCodeItem();
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003044 if (code_item == nullptr) {
3045 // TODO We should not be asked to watch location in a native or abstract method so the code item
3046 // should never be null. We could just check we never encounter this case.
3047 return false;
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003048 }
Hiroshi Yamauchidc376172014-08-22 11:13:12 -07003049 StackHandleScope<3> hs(self);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003050 mirror::Class* declaring_class = m->GetDeclaringClass();
3051 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3052 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
Hiroshi Yamauchidc376172014-08-22 11:13:12 -07003053 Handle<mirror::ArtMethod> method(hs.NewHandle(m));
Ian Rogers7b078e82014-09-10 14:44:24 -07003054 verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
Hiroshi Yamauchidc376172014-08-22 11:13:12 -07003055 &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
Ian Rogers46960fe2014-05-23 10:43:43 -07003056 m->GetAccessFlags(), false, true, false);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003057 // Note: we don't need to verify the method.
3058 return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3059}
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003060
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003061static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
Sebastien Hertzed2be172014-08-19 15:33:43 +02003062 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003063 for (Breakpoint& breakpoint : gBreakpoints) {
3064 if (breakpoint.Method() == m) {
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003065 return &breakpoint;
3066 }
3067 }
3068 return nullptr;
3069}
3070
3071// Sanity checks all existing breakpoints on the same method.
3072static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
Sebastien Hertzed2be172014-08-19 15:33:43 +02003073 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003074 if (kIsDebugBuild) {
3075 for (const Breakpoint& breakpoint : gBreakpoints) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003076 CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003077 }
3078 if (need_full_deoptimization) {
3079 // We should have deoptimized everything but not "selectively" deoptimized this method.
3080 CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3081 CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3082 } else {
3083 // We should have "selectively" deoptimized this method.
3084 // Note: while we have not deoptimized everything for this method, we may have done it for
3085 // another event.
3086 CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3087 }
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003088 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003089}
3090
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003091// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3092// request if we need to deoptimize.
3093void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3094 Thread* const self = Thread::Current();
Brian Carlstromea46f952013-07-30 01:26:50 -07003095 mirror::ArtMethod* m = FromMethodId(location->method_id);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003096 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003097
Sebastien Hertzed2be172014-08-19 15:33:43 +02003098 WriterMutexLock mu(self, *Locks::breakpoint_lock_);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003099 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3100 bool need_full_deoptimization;
3101 if (existing_breakpoint == nullptr) {
3102 // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3103 // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3104 need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3105 if (need_full_deoptimization) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003106 req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3107 req->SetMethod(nullptr);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003108 } else {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003109 req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3110 req->SetMethod(m);
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003111 }
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003112 } else {
3113 // There is at least one breakpoint for this method: we don't need to deoptimize.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003114 req->SetKind(DeoptimizationRequest::kNothing);
3115 req->SetMethod(nullptr);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003116
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003117 need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003118 SanityCheckExistingBreakpoints(m, need_full_deoptimization);
Sebastien Hertz138dbfc2013-12-04 18:15:25 +01003119 }
3120
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003121 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3122 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3123 << gBreakpoints[gBreakpoints.size() - 1];
3124}
3125
3126// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3127// request if we need to undeoptimize.
3128void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
Sebastien Hertzed2be172014-08-19 15:33:43 +02003129 WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003130 mirror::ArtMethod* m = FromMethodId(location->method_id);
3131 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003132 bool need_full_deoptimization = false;
3133 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003134 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003135 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003136 need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003137 DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3138 gBreakpoints.erase(gBreakpoints.begin() + i);
3139 break;
3140 }
3141 }
3142 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3143 if (existing_breakpoint == nullptr) {
3144 // There is no more breakpoint on this method: we need to undeoptimize.
3145 if (need_full_deoptimization) {
3146 // This method required full deoptimization: we need to undeoptimize everything.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003147 req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3148 req->SetMethod(nullptr);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003149 } else {
3150 // This method required selective deoptimization: we need to undeoptimize only that method.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003151 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3152 req->SetMethod(m);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003153 }
3154 } else {
3155 // There is at least one breakpoint for this method: we don't need to undeoptimize.
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07003156 req->SetKind(DeoptimizationRequest::kNothing);
3157 req->SetMethod(nullptr);
Sebastien Hertza76a6d42014-03-20 16:40:17 +01003158 SanityCheckExistingBreakpoints(m, need_full_deoptimization);
Elliott Hughes86964332012-02-15 19:37:42 -08003159 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003160}
3161
Jeff Hao449db332013-04-12 18:30:52 -07003162// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3163// cause suspension if the thread is the current thread.
3164class ScopedThreadSuspension {
3165 public:
Ian Rogers33e95662013-05-20 20:29:14 -07003166 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
Sebastien Hertz52d131d2014-03-13 16:17:40 +01003167 LOCKS_EXCLUDED(Locks::thread_list_lock_)
Ian Rogers33e95662013-05-20 20:29:14 -07003168 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
Ian Rogersf3d874c2014-07-17 18:52:42 -07003169 thread_(nullptr),
Jeff Hao449db332013-04-12 18:30:52 -07003170 error_(JDWP::ERR_NONE),
3171 self_suspend_(false),
Ian Rogers33e95662013-05-20 20:29:14 -07003172 other_suspend_(false) {
Jeff Hao449db332013-04-12 18:30:52 -07003173 ScopedObjectAccessUnchecked soa(self);
3174 {
3175 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07003176 thread_ = DecodeThread(soa, thread_id, &error_);
Jeff Hao449db332013-04-12 18:30:52 -07003177 }
3178 if (error_ == JDWP::ERR_NONE) {
3179 if (thread_ == soa.Self()) {
3180 self_suspend_ = true;
3181 } else {
3182 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3183 jobject thread_peer = gRegistry->GetJObject(thread_id);
3184 bool timed_out;
Ian Rogersf3d874c2014-07-17 18:52:42 -07003185 Thread* suspended_thread;
3186 {
3187 // Take suspend thread lock to avoid races with threads trying to suspend this one.
3188 MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
Brian Carlstromba32de42014-08-27 23:43:46 -07003189 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3190 suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
Ian Rogersf3d874c2014-07-17 18:52:42 -07003191 }
Jeff Hao449db332013-04-12 18:30:52 -07003192 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
Ian Rogersf3d874c2014-07-17 18:52:42 -07003193 if (suspended_thread == nullptr) {
Jeff Hao449db332013-04-12 18:30:52 -07003194 // Thread terminated from under us while suspending.
3195 error_ = JDWP::ERR_INVALID_THREAD;
3196 } else {
3197 CHECK_EQ(suspended_thread, thread_);
3198 other_suspend_ = true;
3199 }
3200 }
3201 }
Elliott Hughes2435a572012-02-17 16:07:41 -08003202 }
Elliott Hughes86964332012-02-15 19:37:42 -08003203
Jeff Hao449db332013-04-12 18:30:52 -07003204 Thread* GetThread() const {
3205 return thread_;
3206 }
3207
3208 JDWP::JdwpError GetError() const {
3209 return error_;
3210 }
3211
3212 ~ScopedThreadSuspension() {
3213 if (other_suspend_) {
3214 Runtime::Current()->GetThreadList()->Resume(thread_, true);
3215 }
3216 }
3217
3218 private:
3219 Thread* thread_;
3220 JDWP::JdwpError error_;
3221 bool self_suspend_;
3222 bool other_suspend_;
3223};
3224
3225JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3226 JDWP::JdwpStepDepth step_depth) {
3227 Thread* self = Thread::Current();
3228 ScopedThreadSuspension sts(self, thread_id);
3229 if (sts.GetError() != JDWP::ERR_NONE) {
3230 return sts.GetError();
3231 }
3232
Elliott Hughes2435a572012-02-17 16:07:41 -08003233 //
3234 // Work out what Method* we're in, the current line number, and how deep the stack currently
3235 // is for step-out.
3236 //
3237
Ian Rogers0399dde2012-06-06 17:09:28 -07003238 struct SingleStepStackVisitor : public StackVisitor {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003239 explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3240 int32_t* line_number)
Ian Rogersb726dcb2012-09-05 08:57:23 -07003241 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogersc0542af2014-09-03 16:16:56 -07003242 : StackVisitor(thread, nullptr), single_step_control_(single_step_control),
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003243 line_number_(line_number) {
3244 DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
Ian Rogersc0542af2014-09-03 16:16:56 -07003245 single_step_control_->method = nullptr;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003246 single_step_control_->stack_depth = 0;
Elliott Hughes86964332012-02-15 19:37:42 -08003247 }
Ian Rogersca190662012-06-26 15:45:57 -07003248
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003249 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3250 // annotalysis.
3251 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003252 mirror::ArtMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07003253 if (!m->IsRuntimeMethod()) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003254 ++single_step_control_->stack_depth;
Ian Rogersc0542af2014-09-03 16:16:56 -07003255 if (single_step_control_->method == nullptr) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003256 mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003257 single_step_control_->method = m;
3258 *line_number_ = -1;
Ian Rogersc0542af2014-09-03 16:16:56 -07003259 if (dex_cache != nullptr) {
Ian Rogers4445a7e2012-10-05 17:19:13 -07003260 const DexFile& dex_file = *dex_cache->GetDexFile();
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003261 *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
Elliott Hughes2435a572012-02-17 16:07:41 -08003262 }
Elliott Hughes86964332012-02-15 19:37:42 -08003263 }
3264 }
Elliott Hughes530fa002012-03-12 11:44:49 -07003265 return true;
Elliott Hughes86964332012-02-15 19:37:42 -08003266 }
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003267
3268 SingleStepControl* const single_step_control_;
3269 int32_t* const line_number_;
Elliott Hughes86964332012-02-15 19:37:42 -08003270 };
Jeff Hao449db332013-04-12 18:30:52 -07003271
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003272 Thread* const thread = sts.GetThread();
3273 SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3274 DCHECK(single_step_control != nullptr);
3275 int32_t line_number = -1;
3276 SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
Ian Rogers0399dde2012-06-06 17:09:28 -07003277 visitor.WalkStack();
Elliott Hughes86964332012-02-15 19:37:42 -08003278
Elliott Hughes2435a572012-02-17 16:07:41 -08003279 //
3280 // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3281 //
3282
3283 struct DebugCallbackContext {
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003284 explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3285 const DexFile::CodeItem* code_item)
3286 : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003287 last_pc_valid(false), last_pc(0) {
Elliott Hughes2435a572012-02-17 16:07:41 -08003288 }
3289
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003290 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
Elliott Hughes2435a572012-02-17 16:07:41 -08003291 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003292 if (static_cast<int32_t>(line_number) == context->line_number_) {
Elliott Hughes2435a572012-02-17 16:07:41 -08003293 if (!context->last_pc_valid) {
3294 // Everything from this address until the next line change is ours.
3295 context->last_pc = address;
3296 context->last_pc_valid = true;
3297 }
3298 // Otherwise, if we're already in a valid range for this line,
3299 // just keep going (shouldn't really happen)...
Brian Carlstrom7934ac22013-07-26 10:54:15 -07003300 } else if (context->last_pc_valid) { // and the line number is new
Elliott Hughes2435a572012-02-17 16:07:41 -08003301 // Add everything from the last entry up until here to the set
3302 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003303 context->single_step_control_->dex_pcs.insert(dex_pc);
Elliott Hughes2435a572012-02-17 16:07:41 -08003304 }
3305 context->last_pc_valid = false;
3306 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -07003307 return false; // There may be multiple entries for any given line.
Elliott Hughes2435a572012-02-17 16:07:41 -08003308 }
3309
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003310 ~DebugCallbackContext() {
Elliott Hughes2435a572012-02-17 16:07:41 -08003311 // If the line number was the last in the position table...
3312 if (last_pc_valid) {
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003313 size_t end = code_item_->insns_size_in_code_units_;
Elliott Hughes2435a572012-02-17 16:07:41 -08003314 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003315 single_step_control_->dex_pcs.insert(dex_pc);
Elliott Hughes2435a572012-02-17 16:07:41 -08003316 }
3317 }
3318 }
3319
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003320 SingleStepControl* const single_step_control_;
3321 const int32_t line_number_;
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003322 const DexFile::CodeItem* const code_item_;
Elliott Hughes2435a572012-02-17 16:07:41 -08003323 bool last_pc_valid;
3324 uint32_t last_pc;
3325 };
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003326 single_step_control->dex_pcs.clear();
Ian Rogersef7d42f2014-01-06 12:55:46 -08003327 mirror::ArtMethod* m = single_step_control->method;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003328 if (!m->IsNative()) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003329 const DexFile::CodeItem* const code_item = m->GetCodeItem();
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003330 DebugCallbackContext context(single_step_control, line_number, code_item);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003331 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
Ian Rogersc0542af2014-09-03 16:16:56 -07003332 DebugCallbackContext::Callback, nullptr, &context);
Elliott Hughes3e2e1a22012-02-21 11:33:41 -08003333 }
Elliott Hughes2435a572012-02-17 16:07:41 -08003334
3335 //
3336 // Everything else...
3337 //
3338
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003339 single_step_control->step_size = step_size;
3340 single_step_control->step_depth = step_depth;
3341 single_step_control->is_active = true;
Elliott Hughes86964332012-02-15 19:37:42 -08003342
Elliott Hughes2435a572012-02-17 16:07:41 -08003343 if (VLOG_IS_ON(jdwp)) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003344 VLOG(jdwp) << "Single-step thread: " << *thread;
3345 VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3346 VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3347 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3348 VLOG(jdwp) << "Single-step current line: " << line_number;
3349 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
Elliott Hughes2435a572012-02-17 16:07:41 -08003350 VLOG(jdwp) << "Single-step dex_pc values:";
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003351 for (uint32_t dex_pc : single_step_control->dex_pcs) {
3352 VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
Elliott Hughes2435a572012-02-17 16:07:41 -08003353 }
3354 }
3355
3356 return JDWP::ERR_NONE;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003357}
3358
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003359void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3360 ScopedObjectAccessUnchecked soa(Thread::Current());
3361 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07003362 JDWP::JdwpError error;
3363 Thread* thread = DecodeThread(soa, thread_id, &error);
Sebastien Hertz87118ed2013-11-26 17:57:18 +01003364 if (error == JDWP::ERR_NONE) {
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003365 SingleStepControl* single_step_control = thread->GetSingleStepControl();
3366 DCHECK(single_step_control != nullptr);
Sebastien Hertzbb43b432014-04-14 11:59:08 +02003367 single_step_control->Clear();
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +01003368 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003369}
3370
Elliott Hughes45651fd2012-02-21 15:48:20 -08003371static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3372 switch (tag) {
3373 default:
3374 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3375
3376 // Primitives.
3377 case JDWP::JT_BYTE: return 'B';
3378 case JDWP::JT_CHAR: return 'C';
3379 case JDWP::JT_FLOAT: return 'F';
3380 case JDWP::JT_DOUBLE: return 'D';
3381 case JDWP::JT_INT: return 'I';
3382 case JDWP::JT_LONG: return 'J';
3383 case JDWP::JT_SHORT: return 'S';
3384 case JDWP::JT_VOID: return 'V';
3385 case JDWP::JT_BOOLEAN: return 'Z';
3386
3387 // Reference types.
3388 case JDWP::JT_ARRAY:
3389 case JDWP::JT_OBJECT:
3390 case JDWP::JT_STRING:
3391 case JDWP::JT_THREAD:
3392 case JDWP::JT_THREAD_GROUP:
3393 case JDWP::JT_CLASS_LOADER:
3394 case JDWP::JT_CLASS_OBJECT:
3395 return 'L';
3396 }
3397}
3398
Elliott Hughes88d63092013-01-09 09:55:54 -08003399JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3400 JDWP::RefTypeId class_id, JDWP::MethodId method_id,
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003401 uint32_t arg_count, uint64_t* arg_values,
3402 JDWP::JdwpTag* arg_types, uint32_t options,
3403 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3404 JDWP::ObjectId* pExceptionId) {
Elliott Hughesd07986f2011-12-06 18:27:45 -08003405 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3406
Ian Rogersc0542af2014-09-03 16:16:56 -07003407 Thread* targetThread = nullptr;
3408 DebugInvokeReq* req = nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003409 Thread* self = Thread::Current();
Elliott Hughesd07986f2011-12-06 18:27:45 -08003410 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003411 ScopedObjectAccessUnchecked soa(self);
Ian Rogers50b35e22012-10-04 10:09:15 -07003412 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07003413 JDWP::JdwpError error;
3414 targetThread = DecodeThread(soa, thread_id, &error);
Elliott Hughes221229c2013-01-08 18:17:50 -08003415 if (error != JDWP::ERR_NONE) {
3416 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3417 return error;
Elliott Hughesd07986f2011-12-06 18:27:45 -08003418 }
3419 req = targetThread->GetInvokeReq();
3420 if (!req->ready) {
3421 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3422 return JDWP::ERR_INVALID_THREAD;
3423 }
3424
3425 /*
3426 * We currently have a bug where we don't successfully resume the
3427 * target thread if the suspend count is too deep. We're expected to
3428 * require one "resume" for each "suspend", but when asked to execute
3429 * a method we have to resume fully and then re-suspend it back to the
3430 * same level. (The easiest way to cause this is to type "suspend"
3431 * multiple times in jdb.)
3432 *
3433 * It's unclear what this means when the event specifies "resume all"
3434 * and some threads are suspended more deeply than others. This is
3435 * a rare problem, so for now we just prevent it from hanging forever
3436 * by rejecting the method invocation request. Without this, we will
3437 * be stuck waiting on a suspended thread.
3438 */
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003439 int suspend_count;
3440 {
Ian Rogers50b35e22012-10-04 10:09:15 -07003441 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003442 suspend_count = targetThread->GetSuspendCount();
3443 }
Elliott Hughesd07986f2011-12-06 18:27:45 -08003444 if (suspend_count > 1) {
3445 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
Brian Carlstrom7934ac22013-07-26 10:54:15 -07003446 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here.
Elliott Hughesd07986f2011-12-06 18:27:45 -08003447 }
3448
Ian Rogersc0542af2014-09-03 16:16:56 -07003449 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3450 if (error != JDWP::ERR_NONE) {
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08003451 return JDWP::ERR_INVALID_OBJECT;
3452 }
Elliott Hughes45651fd2012-02-21 15:48:20 -08003453
Ian Rogersc0542af2014-09-03 16:16:56 -07003454 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id, &error);
3455 if (error != JDWP::ERR_NONE) {
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08003456 return JDWP::ERR_INVALID_OBJECT;
3457 }
Elliott Hughes45651fd2012-02-21 15:48:20 -08003458 // TODO: check that 'thread' is actually a java.lang.Thread!
3459
Ian Rogersc0542af2014-09-03 16:16:56 -07003460 mirror::Class* c = DecodeClass(class_id, &error);
3461 if (c == nullptr) {
3462 return error;
Elliott Hughes3f4d58f2012-02-18 20:05:37 -08003463 }
Elliott Hughes45651fd2012-02-21 15:48:20 -08003464
Brian Carlstromea46f952013-07-30 01:26:50 -07003465 mirror::ArtMethod* m = FromMethodId(method_id);
Ian Rogersc0542af2014-09-03 16:16:56 -07003466 if (m->IsStatic() != (receiver == nullptr)) {
Elliott Hughes45651fd2012-02-21 15:48:20 -08003467 return JDWP::ERR_INVALID_METHODID;
3468 }
3469 if (m->IsStatic()) {
3470 if (m->GetDeclaringClass() != c) {
3471 return JDWP::ERR_INVALID_METHODID;
3472 }
3473 } else {
3474 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3475 return JDWP::ERR_INVALID_METHODID;
3476 }
3477 }
3478
3479 // Check the argument list matches the method.
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003480 uint32_t shorty_len = 0;
3481 const char* shorty = m->GetShorty(&shorty_len);
3482 if (shorty_len - 1 != arg_count) {
Elliott Hughes45651fd2012-02-21 15:48:20 -08003483 return JDWP::ERR_ILLEGAL_ARGUMENT;
3484 }
Elliott Hughes09201632013-04-15 15:50:07 -07003485
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003486 {
3487 StackHandleScope<3> hs(soa.Self());
3488 MethodHelper mh(hs.NewHandle(m));
3489 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3490 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3491 const DexFile::TypeList* types = m->GetParameterTypeList();
3492 for (size_t i = 0; i < arg_count; ++i) {
3493 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
Elliott Hughes09201632013-04-15 15:50:07 -07003494 return JDWP::ERR_ILLEGAL_ARGUMENT;
3495 }
3496
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003497 if (shorty[i + 1] == 'L') {
3498 // Did we really get an argument of an appropriate reference type?
3499 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
Ian Rogersc0542af2014-09-03 16:16:56 -07003500 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3501 if (error != JDWP::ERR_NONE) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003502 return JDWP::ERR_INVALID_OBJECT;
3503 }
Ian Rogersc0542af2014-09-03 16:16:56 -07003504 if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003505 return JDWP::ERR_ILLEGAL_ARGUMENT;
3506 }
3507
3508 // Turn the on-the-wire ObjectId into a jobject.
3509 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3510 v.l = gRegistry->GetJObject(arg_values[i]);
3511 }
Elliott Hughes09201632013-04-15 15:50:07 -07003512 }
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003513 // Update in case it moved.
3514 m = mh.GetMethod();
Elliott Hughes45651fd2012-02-21 15:48:20 -08003515 }
3516
Sebastien Hertzd38667a2013-11-25 15:43:54 +01003517 req->receiver = receiver;
3518 req->thread = thread;
3519 req->klass = c;
3520 req->method = m;
3521 req->arg_count = arg_count;
3522 req->arg_values = arg_values;
3523 req->options = options;
3524 req->invoke_needed = true;
Elliott Hughesd07986f2011-12-06 18:27:45 -08003525 }
3526
3527 // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3528 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3529 // call, and it's unwise to hold it during WaitForSuspend.
3530
3531 {
3532 /*
3533 * We change our (JDWP thread) status, which should be THREAD_RUNNING,
Elliott Hughes81ff3182012-03-23 20:35:56 -07003534 * so we can suspend for a GC if the invoke request causes us to
Elliott Hughesd07986f2011-12-06 18:27:45 -08003535 * run out of memory. It's also a good idea to change it before locking
3536 * the invokeReq mutex, although that should never be held for long.
3537 */
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003538 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003539
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003540 VLOG(jdwp) << " Transferring control to event thread";
Elliott Hughesd07986f2011-12-06 18:27:45 -08003541 {
Sebastien Hertzd38667a2013-11-25 15:43:54 +01003542 MutexLock mu(self, req->lock);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003543
3544 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003545 VLOG(jdwp) << " Resuming all threads";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003546 thread_list->UndoDebuggerSuspensions();
Elliott Hughesd07986f2011-12-06 18:27:45 -08003547 } else {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003548 VLOG(jdwp) << " Resuming event thread only";
Elliott Hughesd07986f2011-12-06 18:27:45 -08003549 thread_list->Resume(targetThread, true);
3550 }
3551
3552 // Wait for the request to finish executing.
Sebastien Hertzd38667a2013-11-25 15:43:54 +01003553 while (req->invoke_needed) {
3554 req->cond.Wait(self);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003555 }
3556 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003557 VLOG(jdwp) << " Control has returned from event thread";
Elliott Hughesd07986f2011-12-06 18:27:45 -08003558
3559 /* wait for thread to re-suspend itself */
Brian Carlstromdf629502013-07-17 22:39:56 -07003560 SuspendThread(thread_id, false /* request_suspension */);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003561 self->TransitionFromSuspendedToRunnable();
Elliott Hughesd07986f2011-12-06 18:27:45 -08003562 }
3563
3564 /*
3565 * Suspend the threads. We waited for the target thread to suspend
3566 * itself, so all we need to do is suspend the others.
3567 *
3568 * The suspendAllThreads() call will double-suspend the event thread,
3569 * so we want to resume the target thread once to keep the books straight.
3570 */
3571 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003572 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003573 VLOG(jdwp) << " Suspending all threads";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003574 thread_list->SuspendAllForDebugger();
3575 self->TransitionFromSuspendedToRunnable();
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003576 VLOG(jdwp) << " Resuming event thread to balance the count";
Elliott Hughesd07986f2011-12-06 18:27:45 -08003577 thread_list->Resume(targetThread, true);
3578 }
3579
3580 // Copy the result.
3581 *pResultTag = req->result_tag;
3582 if (IsPrimitiveTag(req->result_tag)) {
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07003583 *pResultValue = req->result_value.GetJ();
Elliott Hughesd07986f2011-12-06 18:27:45 -08003584 } else {
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07003585 *pResultValue = gRegistry->Add(req->result_value.GetL());
Elliott Hughesd07986f2011-12-06 18:27:45 -08003586 }
3587 *pExceptionId = req->exception;
3588 return req->error;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003589}
3590
3591void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003592 ScopedObjectAccess soa(Thread::Current());
Elliott Hughesd07986f2011-12-06 18:27:45 -08003593
Elliott Hughes81ff3182012-03-23 20:35:56 -07003594 // We can be called while an exception is pending. We need
Elliott Hughesd07986f2011-12-06 18:27:45 -08003595 // to preserve that across the method invocation.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003596 StackHandleScope<4> hs(soa.Self());
3597 auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3598 auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3599 auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
Ian Rogers62d6c772013-02-27 08:32:07 -08003600 uint32_t old_throw_dex_pc;
Sebastien Hertz9f102032014-05-23 08:59:42 +02003601 bool old_exception_report_flag;
Ian Rogers62d6c772013-02-27 08:32:07 -08003602 {
3603 ThrowLocation old_throw_location;
3604 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003605 old_throw_this_object.Assign(old_throw_location.GetThis());
3606 old_throw_method.Assign(old_throw_location.GetMethod());
3607 old_exception.Assign(old_exception_obj);
Ian Rogers62d6c772013-02-27 08:32:07 -08003608 old_throw_dex_pc = old_throw_location.GetDexPc();
Sebastien Hertz9f102032014-05-23 08:59:42 +02003609 old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
Ian Rogers62d6c772013-02-27 08:32:07 -08003610 soa.Self()->ClearException();
3611 }
Elliott Hughesd07986f2011-12-06 18:27:45 -08003612
3613 // Translate the method through the vtable, unless the debugger wants to suppress it.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003614 Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
Ian Rogersc0542af2014-09-03 16:16:56 -07003615 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003616 mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3617 if (actual_method != m.Get()) {
3618 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3619 m.Assign(actual_method);
Elliott Hughes45651fd2012-02-21 15:48:20 -08003620 }
Elliott Hughesd07986f2011-12-06 18:27:45 -08003621 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003622 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
Sebastien Hertzd38667a2013-11-25 15:43:54 +01003623 << " receiver=" << pReq->receiver
3624 << " arg_count=" << pReq->arg_count;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003625 CHECK(m.Get() != nullptr);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003626
3627 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3628
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003629 pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
Ian Rogers53b8b092014-03-13 23:45:53 -07003630 reinterpret_cast<jvalue*>(pReq->arg_values));
Elliott Hughesd07986f2011-12-06 18:27:45 -08003631
Ian Rogersc0542af2014-09-03 16:16:56 -07003632 mirror::Throwable* exception = soa.Self()->GetException(nullptr);
Ian Rogers62d6c772013-02-27 08:32:07 -08003633 soa.Self()->ClearException();
3634 pReq->exception = gRegistry->Add(exception);
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07003635 pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
Elliott Hughesd07986f2011-12-06 18:27:45 -08003636 if (pReq->exception != 0) {
Ian Rogers62d6c772013-02-27 08:32:07 -08003637 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
3638 << " " << exception->Dump();
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07003639 pReq->result_value.SetJ(0);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003640 } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3641 /* if no exception thrown, examine object result more closely */
Ian Rogers98379392014-02-24 16:53:16 -08003642 JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
Elliott Hughesd07986f2011-12-06 18:27:45 -08003643 if (new_tag != pReq->result_tag) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003644 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
Elliott Hughesd07986f2011-12-06 18:27:45 -08003645 pReq->result_tag = new_tag;
3646 }
3647
3648 /*
3649 * Register the object. We don't actually need an ObjectId yet,
3650 * but we do need to be sure that the GC won't move or discard the
3651 * object when we switch out of RUNNING. The ObjectId conversion
3652 * will add the object to the "do not touch" list.
3653 *
3654 * We can't use the "tracked allocation" mechanism here because
3655 * the object is going to be handed off to a different thread.
3656 */
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07003657 gRegistry->Add(pReq->result_value.GetL());
Elliott Hughesd07986f2011-12-06 18:27:45 -08003658 }
3659
Ian Rogersc0542af2014-09-03 16:16:56 -07003660 if (old_exception.Get() != nullptr) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003661 ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
Ian Rogers62d6c772013-02-27 08:32:07 -08003662 old_throw_dex_pc);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003663 soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
Sebastien Hertz9f102032014-05-23 08:59:42 +02003664 soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
Elliott Hughesd07986f2011-12-06 18:27:45 -08003665 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003666}
3667
Elliott Hughesd07986f2011-12-06 18:27:45 -08003668/*
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003669 * "request" contains a full JDWP packet, possibly with multiple chunks. We
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003670 * need to process each, accumulate the replies, and ship the whole thing
3671 * back.
3672 *
3673 * Returns "true" if we have a reply. The reply buffer is newly allocated,
3674 * and includes the chunk type/length, followed by the data.
3675 *
Elliott Hughes3d30d9b2011-12-07 17:35:48 -08003676 * OLD-TODO: we currently assume that the request and reply include a single
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003677 * chunk. If this becomes inconvenient we will need to adapt.
3678 */
Ian Rogersc0542af2014-09-03 16:16:56 -07003679bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003680 Thread* self = Thread::Current();
3681 JNIEnv* env = self->GetJniEnv();
3682
Ian Rogersc0542af2014-09-03 16:16:56 -07003683 uint32_t type = request->ReadUnsigned32("type");
3684 uint32_t length = request->ReadUnsigned32("length");
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003685
3686 // Create a byte[] corresponding to 'request'.
Ian Rogersc0542af2014-09-03 16:16:56 -07003687 size_t request_length = request->size();
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003688 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
Ian Rogersc0542af2014-09-03 16:16:56 -07003689 if (dataArray.get() == nullptr) {
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003690 LOG(WARNING) << "byte[] allocation failed: " << request_length;
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003691 env->ExceptionClear();
3692 return false;
3693 }
Ian Rogersc0542af2014-09-03 16:16:56 -07003694 env->SetByteArrayRegion(dataArray.get(), 0, request_length,
3695 reinterpret_cast<const jbyte*>(request->data()));
3696 request->Skip(request_length);
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003697
3698 // Run through and find all chunks. [Currently just find the first.]
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003699 ScopedByteArrayRO contents(env, dataArray.get());
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003700 if (length != request_length) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003701 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003702 return false;
3703 }
3704
3705 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
Elliott Hugheseac76672012-05-24 21:56:51 -07003706 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3707 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003708 type, dataArray.get(), 0, length));
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003709 if (env->ExceptionCheck()) {
3710 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3711 env->ExceptionDescribe();
3712 env->ExceptionClear();
3713 return false;
3714 }
3715
Ian Rogersc0542af2014-09-03 16:16:56 -07003716 if (chunk.get() == nullptr) {
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003717 return false;
3718 }
3719
3720 /*
3721 * Pull the pieces out of the chunk. We copy the results into a
3722 * newly-allocated buffer that the caller can free. We don't want to
3723 * continue using the Chunk object because nothing has a reference to it.
3724 *
3725 * We could avoid this by returning type/data/offset/length and having
3726 * the caller be aware of the object lifetime issues, but that
Elliott Hughes81ff3182012-03-23 20:35:56 -07003727 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003728 * if we have responses for multiple chunks.
3729 *
3730 * So we're pretty much stuck with copying data around multiple times.
3731 */
Elliott Hugheseac76672012-05-24 21:56:51 -07003732 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003733 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
Elliott Hugheseac76672012-05-24 21:56:51 -07003734 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
Elliott Hugheseac76672012-05-24 21:56:51 -07003735 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003736
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003737 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
Ian Rogersc0542af2014-09-03 16:16:56 -07003738 if (length == 0 || replyData.get() == nullptr) {
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003739 return false;
3740 }
3741
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003742 const int kChunkHdrLen = 8;
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003743 uint8_t* reply = new uint8_t[length + kChunkHdrLen];
Ian Rogersc0542af2014-09-03 16:16:56 -07003744 if (reply == nullptr) {
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003745 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3746 return false;
3747 }
Elliott Hughesf7c3b662011-10-27 12:04:56 -07003748 JDWP::Set4BE(reply + 0, type);
3749 JDWP::Set4BE(reply + 4, length);
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003750 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003751
3752 *pReplyBuf = reply;
3753 *pReplyLen = length + kChunkHdrLen;
3754
Elliott Hughes4b9702c2013-02-20 18:13:24 -08003755 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
Elliott Hughesf6a1e1e2011-10-25 16:28:04 -07003756 return true;
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003757}
3758
Elliott Hughesa2155262011-11-16 16:26:58 -08003759void Dbg::DdmBroadcast(bool connect) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003760 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
Elliott Hughes47fce012011-10-25 18:37:19 -07003761
3762 Thread* self = Thread::Current();
Ian Rogers50b35e22012-10-04 10:09:15 -07003763 if (self->GetState() != kRunnable) {
3764 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3765 /* try anyway? */
Elliott Hughes47fce012011-10-25 18:37:19 -07003766 }
3767
3768 JNIEnv* env = self->GetJniEnv();
Elliott Hughes47fce012011-10-25 18:37:19 -07003769 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
Elliott Hugheseac76672012-05-24 21:56:51 -07003770 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3771 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3772 event);
Elliott Hughes47fce012011-10-25 18:37:19 -07003773 if (env->ExceptionCheck()) {
3774 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3775 env->ExceptionDescribe();
3776 env->ExceptionClear();
3777 }
3778}
3779
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003780void Dbg::DdmConnected() {
Elliott Hughesa2155262011-11-16 16:26:58 -08003781 Dbg::DdmBroadcast(true);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003782}
3783
3784void Dbg::DdmDisconnected() {
Elliott Hughesa2155262011-11-16 16:26:58 -08003785 Dbg::DdmBroadcast(false);
Elliott Hughes47fce012011-10-25 18:37:19 -07003786 gDdmThreadNotification = false;
3787}
3788
3789/*
Elliott Hughes82188472011-11-07 18:11:48 -08003790 * Send a notification when a thread starts, stops, or changes its name.
Elliott Hughes47fce012011-10-25 18:37:19 -07003791 *
3792 * Because we broadcast the full set of threads when the notifications are
3793 * first enabled, it's possible for "thread" to be actively executing.
3794 */
Elliott Hughes82188472011-11-07 18:11:48 -08003795void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
Elliott Hughes47fce012011-10-25 18:37:19 -07003796 if (!gDdmThreadNotification) {
3797 return;
3798 }
3799
Elliott Hughes82188472011-11-07 18:11:48 -08003800 if (type == CHUNK_TYPE("THDE")) {
Elliott Hughes47fce012011-10-25 18:37:19 -07003801 uint8_t buf[4];
Ian Rogersd9c4fc92013-10-01 19:45:43 -07003802 JDWP::Set4BE(&buf[0], t->GetThreadId());
Elliott Hughes47fce012011-10-25 18:37:19 -07003803 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
Elliott Hughes82188472011-11-07 18:11:48 -08003804 } else {
3805 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003806 ScopedObjectAccessUnchecked soa(Thread::Current());
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003807 StackHandleScope<1> hs(soa.Self());
3808 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
Ian Rogersc0542af2014-09-03 16:16:56 -07003809 size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
3810 const jchar* chars = (name.Get() != nullptr) ? name->GetCharArray()->GetData() : nullptr;
Elliott Hughes82188472011-11-07 18:11:48 -08003811
Elliott Hughes21f32d72011-11-09 17:44:13 -08003812 std::vector<uint8_t> bytes;
Ian Rogersd9c4fc92013-10-01 19:45:43 -07003813 JDWP::Append4BE(bytes, t->GetThreadId());
Elliott Hughes545a0642011-11-08 19:10:03 -08003814 JDWP::AppendUtf16BE(bytes, chars, char_count);
Elliott Hughes21f32d72011-11-09 17:44:13 -08003815 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3816 Dbg::DdmSendChunk(type, bytes);
Elliott Hughes47fce012011-10-25 18:37:19 -07003817 }
3818}
3819
Elliott Hughes47fce012011-10-25 18:37:19 -07003820void Dbg::DdmSetThreadNotification(bool enable) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003821 // Enable/disable thread notifications.
Elliott Hughes47fce012011-10-25 18:37:19 -07003822 gDdmThreadNotification = enable;
3823 if (enable) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003824 // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3825 // see a suspension in progress and block until that ends. They then post their own start
3826 // notification.
3827 SuspendVM();
3828 std::list<Thread*> threads;
Ian Rogers50b35e22012-10-04 10:09:15 -07003829 Thread* self = Thread::Current();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003830 {
Ian Rogers50b35e22012-10-04 10:09:15 -07003831 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003832 threads = Runtime::Current()->GetThreadList()->GetList();
3833 }
3834 {
Ian Rogers50b35e22012-10-04 10:09:15 -07003835 ScopedObjectAccess soa(self);
Mathieu Chartier02e25112013-08-14 16:14:24 -07003836 for (Thread* thread : threads) {
3837 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003838 }
3839 }
3840 ResumeVM();
Elliott Hughes47fce012011-10-25 18:37:19 -07003841 }
3842}
3843
Elliott Hughesa2155262011-11-16 16:26:58 -08003844void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
Elliott Hughesc0f09332012-03-26 13:27:06 -07003845 if (IsDebuggerActive()) {
Mathieu Chartierdbe6f462012-09-25 16:54:50 -07003846 ScopedObjectAccessUnchecked soa(Thread::Current());
Ian Rogerscfaa4552012-11-26 21:00:08 -08003847 JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
Elliott Hughes82188472011-11-07 18:11:48 -08003848 gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
Elliott Hughes47fce012011-10-25 18:37:19 -07003849 }
Elliott Hughes82188472011-11-07 18:11:48 -08003850 Dbg::DdmSendThreadNotification(t, type);
Elliott Hughes47fce012011-10-25 18:37:19 -07003851}
3852
3853void Dbg::PostThreadStart(Thread* t) {
Elliott Hughesa2155262011-11-16 16:26:58 -08003854 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
Elliott Hughes47fce012011-10-25 18:37:19 -07003855}
3856
3857void Dbg::PostThreadDeath(Thread* t) {
Elliott Hughesa2155262011-11-16 16:26:58 -08003858 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003859}
3860
Elliott Hughes82188472011-11-07 18:11:48 -08003861void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
Ian Rogersc0542af2014-09-03 16:16:56 -07003862 CHECK(buf != nullptr);
Elliott Hughes3bb81562011-10-21 18:52:59 -07003863 iovec vec[1];
3864 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3865 vec[0].iov_len = byte_count;
3866 Dbg::DdmSendChunkV(type, vec, 1);
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003867}
3868
Elliott Hughes21f32d72011-11-09 17:44:13 -08003869void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3870 DdmSendChunk(type, bytes.size(), &bytes[0]);
3871}
3872
Brian Carlstromf5293522013-07-19 00:24:00 -07003873void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
Ian Rogersc0542af2014-09-03 16:16:56 -07003874 if (gJdwpState == nullptr) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08003875 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
Elliott Hughes3bb81562011-10-21 18:52:59 -07003876 } else {
Elliott Hughescccd84f2011-12-05 16:51:54 -08003877 gJdwpState->DdmSendChunkV(type, iov, iov_count);
Elliott Hughes3bb81562011-10-21 18:52:59 -07003878 }
Elliott Hughes872d4ec2011-10-21 17:07:15 -07003879}
3880
Elliott Hughes767a1472011-10-26 18:49:02 -07003881int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3882 if (when == HPIF_WHEN_NOW) {
Elliott Hughes7162ad92011-10-27 14:08:42 -07003883 DdmSendHeapInfo(when);
Elliott Hughes767a1472011-10-26 18:49:02 -07003884 return true;
3885 }
3886
3887 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3888 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3889 return false;
3890 }
3891
3892 gDdmHpifWhen = when;
3893 return true;
3894}
3895
3896bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3897 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3898 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3899 return false;
3900 }
3901
3902 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3903 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3904 return false;
3905 }
3906
3907 if (native) {
3908 gDdmNhsgWhen = when;
3909 gDdmNhsgWhat = what;
3910 } else {
3911 gDdmHpsgWhen = when;
3912 gDdmHpsgWhat = what;
3913 }
3914 return true;
3915}
3916
Elliott Hughes7162ad92011-10-27 14:08:42 -07003917void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3918 // If there's a one-shot 'when', reset it.
3919 if (reason == gDdmHpifWhen) {
3920 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3921 gDdmHpifWhen = HPIF_WHEN_NEVER;
3922 }
3923 }
3924
3925 /*
3926 * Chunk HPIF (client --> server)
3927 *
3928 * Heap Info. General information about the heap,
3929 * suitable for a summary display.
3930 *
3931 * [u4]: number of heaps
3932 *
3933 * For each heap:
3934 * [u4]: heap ID
3935 * [u8]: timestamp in ms since Unix epoch
3936 * [u1]: capture reason (same as 'when' value from server)
3937 * [u4]: max heap size in bytes (-Xmx)
3938 * [u4]: current heap size in bytes
3939 * [u4]: current number of bytes allocated
3940 * [u4]: current number of objects allocated
3941 */
3942 uint8_t heap_count = 1;
Ian Rogers1d54e732013-05-02 21:10:01 -07003943 gc::Heap* heap = Runtime::Current()->GetHeap();
Elliott Hughes21f32d72011-11-09 17:44:13 -08003944 std::vector<uint8_t> bytes;
Elliott Hughes545a0642011-11-08 19:10:03 -08003945 JDWP::Append4BE(bytes, heap_count);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07003946 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
Elliott Hughes545a0642011-11-08 19:10:03 -08003947 JDWP::Append8BE(bytes, MilliTime());
3948 JDWP::Append1BE(bytes, reason);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07003949 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
3950 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08003951 JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3952 JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
Elliott Hughes21f32d72011-11-09 17:44:13 -08003953 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3954 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
Elliott Hughes767a1472011-10-26 18:49:02 -07003955}
3956
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003957enum HpsgSolidity {
3958 SOLIDITY_FREE = 0,
3959 SOLIDITY_HARD = 1,
3960 SOLIDITY_SOFT = 2,
3961 SOLIDITY_WEAK = 3,
3962 SOLIDITY_PHANTOM = 4,
3963 SOLIDITY_FINALIZABLE = 5,
3964 SOLIDITY_SWEEP = 6,
3965};
3966
3967enum HpsgKind {
3968 KIND_OBJECT = 0,
3969 KIND_CLASS_OBJECT = 1,
3970 KIND_ARRAY_1 = 2,
3971 KIND_ARRAY_2 = 3,
3972 KIND_ARRAY_4 = 4,
3973 KIND_ARRAY_8 = 5,
3974 KIND_UNKNOWN = 6,
3975 KIND_NATIVE = 7,
3976};
3977
3978#define HPSG_PARTIAL (1<<7)
3979#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3980
Ian Rogers30fab402012-01-23 15:43:46 -08003981class HeapChunkContext {
3982 public:
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003983 // Maximum chunk size. Obtain this from the formula:
3984 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3985 HeapChunkContext(bool merge, bool native)
Ian Rogers30fab402012-01-23 15:43:46 -08003986 : buf_(16384 - 16),
3987 type_(0),
Mathieu Chartier36dab362014-07-30 14:59:56 -07003988 merge_(merge),
3989 chunk_overhead_(0) {
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003990 Reset();
3991 if (native) {
Ian Rogers30fab402012-01-23 15:43:46 -08003992 type_ = CHUNK_TYPE("NHSG");
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003993 } else {
Ian Rogers30fab402012-01-23 15:43:46 -08003994 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
Elliott Hughes6a5bd492011-10-28 14:33:57 -07003995 }
3996 }
3997
3998 ~HeapChunkContext() {
Ian Rogers30fab402012-01-23 15:43:46 -08003999 if (p_ > &buf_[0]) {
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004000 Flush();
4001 }
4002 }
4003
Mathieu Chartier36dab362014-07-30 14:59:56 -07004004 void SetChunkOverhead(size_t chunk_overhead) {
4005 chunk_overhead_ = chunk_overhead;
4006 }
4007
4008 void ResetStartOfNextChunk() {
4009 startOfNextMemoryChunk_ = nullptr;
4010 }
4011
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004012 void EnsureHeader(const void* chunk_ptr) {
Ian Rogers30fab402012-01-23 15:43:46 -08004013 if (!needHeader_) {
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004014 return;
4015 }
4016
4017 // Start a new HPSx chunk.
Brian Carlstrom7934ac22013-07-26 10:54:15 -07004018 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
4019 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes.
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004020
Brian Carlstrom7934ac22013-07-26 10:54:15 -07004021 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
4022 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004023 // [u4]: length of piece, in allocation units
4024 // We won't know this until we're done, so save the offset and stuff in a dummy value.
Ian Rogers30fab402012-01-23 15:43:46 -08004025 pieceLenField_ = p_;
4026 JDWP::Write4BE(&p_, 0x55555555);
4027 needHeader_ = false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004028 }
4029
Ian Rogersb726dcb2012-09-05 08:57:23 -07004030 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -07004031 if (pieceLenField_ == nullptr) {
Ian Rogersd636b062013-01-18 17:51:18 -08004032 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4033 CHECK(needHeader_);
4034 return;
4035 }
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004036 // Patch the "length of piece" field.
Ian Rogers30fab402012-01-23 15:43:46 -08004037 CHECK_LE(&buf_[0], pieceLenField_);
4038 CHECK_LE(pieceLenField_, p_);
4039 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004040
Ian Rogers30fab402012-01-23 15:43:46 -08004041 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004042 Reset();
4043 }
4044
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004045 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
Ian Rogersb726dcb2012-09-05 08:57:23 -07004046 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4047 Locks::mutator_lock_) {
Ian Rogers30fab402012-01-23 15:43:46 -08004048 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
Elliott Hughesa2155262011-11-16 16:26:58 -08004049 }
4050
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004051 private:
Elliott Hughesa2155262011-11-16 16:26:58 -08004052 enum { ALLOCATION_UNIT_SIZE = 8 };
4053
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004054 void Reset() {
Ian Rogers30fab402012-01-23 15:43:46 -08004055 p_ = &buf_[0];
Mathieu Chartier36dab362014-07-30 14:59:56 -07004056 ResetStartOfNextChunk();
Ian Rogers30fab402012-01-23 15:43:46 -08004057 totalAllocationUnits_ = 0;
4058 needHeader_ = true;
Ian Rogersc0542af2014-09-03 16:16:56 -07004059 pieceLenField_ = nullptr;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004060 }
4061
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004062 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
Ian Rogersb726dcb2012-09-05 08:57:23 -07004063 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4064 Locks::mutator_lock_) {
Ian Rogers30fab402012-01-23 15:43:46 -08004065 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4066 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
Ian Rogers15bf2d32012-08-28 17:33:04 -07004067 if (used_bytes == 0) {
Ian Rogersc0542af2014-09-03 16:16:56 -07004068 if (start == nullptr) {
Ian Rogers15bf2d32012-08-28 17:33:04 -07004069 // Reset for start of new heap.
Ian Rogersc0542af2014-09-03 16:16:56 -07004070 startOfNextMemoryChunk_ = nullptr;
Ian Rogers15bf2d32012-08-28 17:33:04 -07004071 Flush();
4072 }
4073 // Only process in use memory so that free region information
4074 // also includes dlmalloc book keeping.
Elliott Hughesa2155262011-11-16 16:26:58 -08004075 return;
Elliott Hughesa2155262011-11-16 16:26:58 -08004076 }
4077
Ian Rogers15bf2d32012-08-28 17:33:04 -07004078 /* If we're looking at the native heap, we'll just return
4079 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4080 */
4081 bool native = type_ == CHUNK_TYPE("NHSG");
4082
Mathieu Chartier36dab362014-07-30 14:59:56 -07004083 // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4084 // count gaps inbetween spaces as free memory.
Ian Rogersc0542af2014-09-03 16:16:56 -07004085 if (startOfNextMemoryChunk_ != nullptr) {
Ian Rogers15bf2d32012-08-28 17:33:04 -07004086 // Transmit any pending free memory. Native free memory of
4087 // over kMaxFreeLen could be because of the use of mmaps, so
4088 // don't report. If not free memory then start a new segment.
4089 bool flush = true;
4090 if (start > startOfNextMemoryChunk_) {
4091 const size_t kMaxFreeLen = 2 * kPageSize;
4092 void* freeStart = startOfNextMemoryChunk_;
4093 void* freeEnd = start;
Brian Carlstrom2d888622013-07-18 17:02:00 -07004094 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
Ian Rogers15bf2d32012-08-28 17:33:04 -07004095 if (!native || freeLen < kMaxFreeLen) {
4096 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4097 flush = false;
4098 }
4099 }
4100 if (flush) {
Ian Rogersc0542af2014-09-03 16:16:56 -07004101 startOfNextMemoryChunk_ = nullptr;
Ian Rogers15bf2d32012-08-28 17:33:04 -07004102 Flush();
4103 }
4104 }
Ian Rogersef7d42f2014-01-06 12:55:46 -08004105 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
Elliott Hughesa2155262011-11-16 16:26:58 -08004106
4107 // Determine the type of this chunk.
4108 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4109 // If it's the same, we should combine them.
Ian Rogers15bf2d32012-08-28 17:33:04 -07004110 uint8_t state = ExamineObject(obj, native);
Mathieu Chartier36dab362014-07-30 14:59:56 -07004111 AppendChunk(state, start, used_bytes + chunk_overhead_);
4112 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
Ian Rogers15bf2d32012-08-28 17:33:04 -07004113 }
Elliott Hughesa2155262011-11-16 16:26:58 -08004114
Ian Rogers15bf2d32012-08-28 17:33:04 -07004115 void AppendChunk(uint8_t state, void* ptr, size_t length)
Ian Rogersb726dcb2012-09-05 08:57:23 -07004116 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers15bf2d32012-08-28 17:33:04 -07004117 // Make sure there's enough room left in the buffer.
4118 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4119 // 17 bytes for any header.
4120 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4121 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4122 if (bytesLeft < needed) {
4123 Flush();
4124 }
4125
4126 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4127 if (bytesLeft < needed) {
4128 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4129 << needed << " bytes)";
4130 return;
4131 }
4132 EnsureHeader(ptr);
Elliott Hughesa2155262011-11-16 16:26:58 -08004133 // Write out the chunk description.
Ian Rogers15bf2d32012-08-28 17:33:04 -07004134 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
4135 totalAllocationUnits_ += length;
4136 while (length > 256) {
Ian Rogers30fab402012-01-23 15:43:46 -08004137 *p_++ = state | HPSG_PARTIAL;
4138 *p_++ = 255; // length - 1
Ian Rogers15bf2d32012-08-28 17:33:04 -07004139 length -= 256;
Elliott Hughesa2155262011-11-16 16:26:58 -08004140 }
Ian Rogers30fab402012-01-23 15:43:46 -08004141 *p_++ = state;
Ian Rogers15bf2d32012-08-28 17:33:04 -07004142 *p_++ = length - 1;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004143 }
4144
Ian Rogersef7d42f2014-01-06 12:55:46 -08004145 uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4146 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogersc0542af2014-09-03 16:16:56 -07004147 if (o == nullptr) {
Elliott Hughesa2155262011-11-16 16:26:58 -08004148 return HPSG_STATE(SOLIDITY_FREE, 0);
4149 }
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004150
Elliott Hughesa2155262011-11-16 16:26:58 -08004151 // It's an allocated chunk. Figure out what it is.
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004152
Elliott Hughesa2155262011-11-16 16:26:58 -08004153 // If we're looking at the native heap, we'll just return
4154 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004155 if (is_native_heap) {
Elliott Hughesa2155262011-11-16 16:26:58 -08004156 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4157 }
4158
Ian Rogers5bfa60f2012-09-02 21:17:56 -07004159 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
Ian Rogers15bf2d32012-08-28 17:33:04 -07004160 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004161 }
4162
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08004163 mirror::Class* c = o->GetClass();
Ian Rogersc0542af2014-09-03 16:16:56 -07004164 if (c == nullptr) {
Elliott Hughesa2155262011-11-16 16:26:58 -08004165 // The object was probably just created but hasn't been initialized yet.
4166 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4167 }
4168
Mathieu Chartier590fee92013-09-13 13:46:47 -07004169 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
Ian Rogers15bf2d32012-08-28 17:33:04 -07004170 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
Elliott Hughesa2155262011-11-16 16:26:58 -08004171 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4172 }
4173
4174 if (c->IsClassClass()) {
4175 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4176 }
4177
4178 if (c->IsArrayClass()) {
4179 if (o->IsObjectArray()) {
4180 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4181 }
4182 switch (c->GetComponentSize()) {
4183 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4184 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4185 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4186 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4187 }
4188 }
4189
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004190 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4191 }
4192
Ian Rogers30fab402012-01-23 15:43:46 -08004193 std::vector<uint8_t> buf_;
4194 uint8_t* p_;
4195 uint8_t* pieceLenField_;
Ian Rogers15bf2d32012-08-28 17:33:04 -07004196 void* startOfNextMemoryChunk_;
Ian Rogers30fab402012-01-23 15:43:46 -08004197 size_t totalAllocationUnits_;
4198 uint32_t type_;
4199 bool merge_;
4200 bool needHeader_;
Mathieu Chartier36dab362014-07-30 14:59:56 -07004201 size_t chunk_overhead_;
Ian Rogers30fab402012-01-23 15:43:46 -08004202
Elliott Hughesa2155262011-11-16 16:26:58 -08004203 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4204};
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004205
Mathieu Chartier36dab362014-07-30 14:59:56 -07004206static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4208 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4209 HeapChunkContext::HeapChunkCallback(
4210 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4211}
4212
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004213void Dbg::DdmSendHeapSegments(bool native) {
4214 Dbg::HpsgWhen when;
4215 Dbg::HpsgWhat what;
4216 if (!native) {
4217 when = gDdmHpsgWhen;
4218 what = gDdmHpsgWhat;
4219 } else {
4220 when = gDdmNhsgWhen;
4221 what = gDdmNhsgWhat;
4222 }
4223 if (when == HPSG_WHEN_NEVER) {
4224 return;
4225 }
4226
4227 // Figure out what kind of chunks we'll be sending.
4228 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4229
4230 // First, send a heap start chunk.
4231 uint8_t heap_id[4];
Brian Carlstrom7934ac22013-07-26 10:54:15 -07004232 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004233 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4234
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07004235 Thread* self = Thread::Current();
4236
4237 // To allow the Walk/InspectAll() below to exclusively-lock the
4238 // mutator lock, temporarily release the shared access to the
4239 // mutator lock here by transitioning to the suspended state.
4240 Locks::mutator_lock_->AssertSharedHeld(self);
4241 self->TransitionFromRunnableToSuspended(kSuspended);
4242
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004243 // Send a series of heap segment chunks.
Elliott Hughesa2155262011-11-16 16:26:58 -08004244 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4245 if (native) {
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07004246#ifdef USE_DLMALLOC
Ian Rogers1d54e732013-05-02 21:10:01 -07004247 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07004248#else
4249 UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4250#endif
Elliott Hughesa2155262011-11-16 16:26:58 -08004251 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07004252 gc::Heap* heap = Runtime::Current()->GetHeap();
Mathieu Chartier36dab362014-07-30 14:59:56 -07004253 for (const auto& space : heap->GetContinuousSpaces()) {
4254 if (space->IsDlMallocSpace()) {
4255 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4256 // allocation then the first sizeof(size_t) may belong to it.
4257 context.SetChunkOverhead(sizeof(size_t));
4258 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4259 } else if (space->IsRosAllocSpace()) {
4260 context.SetChunkOverhead(0);
4261 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4262 } else if (space->IsBumpPointerSpace()) {
4263 context.SetChunkOverhead(0);
4264 ReaderMutexLock mu(self, *Locks::mutator_lock_);
4265 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4266 space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4267 } else {
4268 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07004269 }
Mathieu Chartier36dab362014-07-30 14:59:56 -07004270 context.ResetStartOfNextChunk();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07004271 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07004272 // Walk the large objects, these are not in the AllocSpace.
Mathieu Chartier36dab362014-07-30 14:59:56 -07004273 context.SetChunkOverhead(0);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07004274 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
Elliott Hughesa2155262011-11-16 16:26:58 -08004275 }
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004276
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07004277 // Shared-lock the mutator lock back.
4278 self->TransitionFromSuspendedToRunnable();
4279 Locks::mutator_lock_->AssertSharedHeld(self);
4280
Elliott Hughes6a5bd492011-10-28 14:33:57 -07004281 // Finally, send a heap end chunk.
4282 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
Elliott Hughes767a1472011-10-26 18:49:02 -07004283}
4284
Elliott Hughesb1a58792013-07-11 18:10:58 -07004285static size_t GetAllocTrackerMax() {
4286#ifdef HAVE_ANDROID_OS
4287 // Check whether there's a system property overriding the number of records.
4288 const char* propertyName = "dalvik.vm.allocTrackerMax";
4289 char allocRecordMaxString[PROPERTY_VALUE_MAX];
4290 if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4291 char* end;
4292 size_t value = strtoul(allocRecordMaxString, &end, 10);
4293 if (*end != '\0') {
Ruben Brunk3e47a742013-09-09 17:56:07 -07004294 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4295 << "' --- invalid";
Elliott Hughesb1a58792013-07-11 18:10:58 -07004296 return kDefaultNumAllocRecords;
4297 }
4298 if (!IsPowerOfTwo(value)) {
Ruben Brunk3e47a742013-09-09 17:56:07 -07004299 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4300 << "' --- not power of two";
Elliott Hughesb1a58792013-07-11 18:10:58 -07004301 return kDefaultNumAllocRecords;
4302 }
4303 return value;
4304 }
4305#endif
4306 return kDefaultNumAllocRecords;
4307}
4308
Brian Carlstrom306db812014-09-05 13:01:41 -07004309void Dbg::SetAllocTrackingEnabled(bool enable) {
4310 Thread* self = Thread::Current();
4311 if (enable) {
Sebastien Hertzb98063a2014-03-26 10:57:20 +01004312 {
Brian Carlstrom306db812014-09-05 13:01:41 -07004313 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4314 if (recent_allocation_records_ != nullptr) {
4315 return; // Already enabled, bail.
Sebastien Hertzb98063a2014-03-26 10:57:20 +01004316 }
Brian Carlstrom306db812014-09-05 13:01:41 -07004317 alloc_record_max_ = GetAllocTrackerMax();
4318 LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4319 << kMaxAllocRecordStackDepth << " frames, taking "
4320 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4321 DCHECK_EQ(alloc_record_head_, 0U);
4322 DCHECK_EQ(alloc_record_count_, 0U);
4323 recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4324 CHECK(recent_allocation_records_ != nullptr);
Elliott Hughes545a0642011-11-08 19:10:03 -08004325 }
Ian Rogersfa824272013-11-05 16:12:57 -08004326 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Elliott Hughes545a0642011-11-08 19:10:03 -08004327 } else {
Sebastien Hertzb98063a2014-03-26 10:57:20 +01004328 {
Brian Carlstrom306db812014-09-05 13:01:41 -07004329 ScopedObjectAccess soa(self); // For type_cache_.Clear();
4330 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4331 if (recent_allocation_records_ == nullptr) {
4332 return; // Already disabled, bail.
4333 }
Mathieu Chartier4345c462014-06-27 10:20:14 -07004334 LOG(INFO) << "Disabling alloc tracker";
Sebastien Hertzb98063a2014-03-26 10:57:20 +01004335 delete[] recent_allocation_records_;
Ian Rogersc0542af2014-09-03 16:16:56 -07004336 recent_allocation_records_ = nullptr;
Brian Carlstrom306db812014-09-05 13:01:41 -07004337 alloc_record_head_ = 0;
4338 alloc_record_count_ = 0;
Mathieu Chartier4345c462014-06-27 10:20:14 -07004339 type_cache_.Clear();
Sebastien Hertzb98063a2014-03-26 10:57:20 +01004340 }
Brian Carlstrom306db812014-09-05 13:01:41 -07004341 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4342 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
Elliott Hughes545a0642011-11-08 19:10:03 -08004343 }
4344}
4345
Ian Rogers0399dde2012-06-06 17:09:28 -07004346struct AllocRecordStackVisitor : public StackVisitor {
Ian Rogers7a22fa62013-01-23 12:16:16 -08004347 AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
Ian Rogersb726dcb2012-09-05 08:57:23 -07004348 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogersc0542af2014-09-03 16:16:56 -07004349 : StackVisitor(thread, nullptr), record(record), depth(0) {}
Elliott Hughes545a0642011-11-08 19:10:03 -08004350
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004351 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4352 // annotalysis.
4353 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
Elliott Hughes545a0642011-11-08 19:10:03 -08004354 if (depth >= kMaxAllocRecordStackDepth) {
Elliott Hughes530fa002012-03-12 11:44:49 -07004355 return false;
Elliott Hughes545a0642011-11-08 19:10:03 -08004356 }
Brian Carlstromea46f952013-07-30 01:26:50 -07004357 mirror::ArtMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07004358 if (!m->IsRuntimeMethod()) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004359 record->StackElement(depth)->SetMethod(m);
4360 record->StackElement(depth)->SetDexPc(GetDexPc());
Elliott Hughes530fa002012-03-12 11:44:49 -07004361 ++depth;
Elliott Hughes545a0642011-11-08 19:10:03 -08004362 }
Elliott Hughes530fa002012-03-12 11:44:49 -07004363 return true;
Elliott Hughes545a0642011-11-08 19:10:03 -08004364 }
4365
4366 ~AllocRecordStackVisitor() {
4367 // Clear out any unused stack trace elements.
4368 for (; depth < kMaxAllocRecordStackDepth; ++depth) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004369 record->StackElement(depth)->SetMethod(nullptr);
4370 record->StackElement(depth)->SetDexPc(0);
Elliott Hughes545a0642011-11-08 19:10:03 -08004371 }
4372 }
4373
4374 AllocRecord* record;
4375 size_t depth;
4376};
4377
Ian Rogers844506b2014-09-12 19:59:33 -07004378void Dbg::RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count) {
Brian Carlstrom306db812014-09-05 13:01:41 -07004379 MutexLock mu(self, *Locks::alloc_tracker_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07004380 if (recent_allocation_records_ == nullptr) {
Brian Carlstrom306db812014-09-05 13:01:41 -07004381 // In the process of shutting down recording, bail.
Elliott Hughes545a0642011-11-08 19:10:03 -08004382 return;
4383 }
4384
4385 // Advance and clip.
Ian Rogers719d1a32014-03-06 12:13:39 -08004386 if (++alloc_record_head_ == alloc_record_max_) {
4387 alloc_record_head_ = 0;
Elliott Hughes545a0642011-11-08 19:10:03 -08004388 }
4389
4390 // Fill in the basics.
Ian Rogers719d1a32014-03-06 12:13:39 -08004391 AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004392 record->SetType(type);
4393 record->SetByteCount(byte_count);
4394 record->SetThinLockId(self->GetThreadId());
Elliott Hughes545a0642011-11-08 19:10:03 -08004395
4396 // Fill in the stack trace.
Ian Rogers7a22fa62013-01-23 12:16:16 -08004397 AllocRecordStackVisitor visitor(self, record);
Ian Rogers0399dde2012-06-06 17:09:28 -07004398 visitor.WalkStack();
Elliott Hughes545a0642011-11-08 19:10:03 -08004399
Ian Rogers719d1a32014-03-06 12:13:39 -08004400 if (alloc_record_count_ < alloc_record_max_) {
4401 ++alloc_record_count_;
Elliott Hughes545a0642011-11-08 19:10:03 -08004402 }
4403}
4404
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004405// Returns the index of the head element.
4406//
Brian Carlstrom306db812014-09-05 13:01:41 -07004407// We point at the most-recently-written record, so if alloc_record_count_ is 1
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004408// we want to use the current element. Take "head+1" and subtract count
4409// from it.
4410//
4411// We need to handle underflow in our circular buffer, so we add
Brian Carlstrom306db812014-09-05 13:01:41 -07004412// alloc_record_max_ and then mask it back down.
Ian Rogers719d1a32014-03-06 12:13:39 -08004413size_t Dbg::HeadIndex() {
4414 return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4415 (Dbg::alloc_record_max_ - 1);
Elliott Hughes545a0642011-11-08 19:10:03 -08004416}
4417
4418void Dbg::DumpRecentAllocations() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07004419 ScopedObjectAccess soa(Thread::Current());
Brian Carlstrom306db812014-09-05 13:01:41 -07004420 MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -07004421 if (recent_allocation_records_ == nullptr) {
Elliott Hughes545a0642011-11-08 19:10:03 -08004422 LOG(INFO) << "Not recording tracked allocations";
4423 return;
4424 }
4425
4426 // "i" is the head of the list. We want to start at the end of the
4427 // list and move forward to the tail.
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004428 size_t i = HeadIndex();
Brian Carlstrom306db812014-09-05 13:01:41 -07004429 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4430 uint16_t count = capped_count;
Elliott Hughes545a0642011-11-08 19:10:03 -08004431
Ian Rogers719d1a32014-03-06 12:13:39 -08004432 LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
Elliott Hughes545a0642011-11-08 19:10:03 -08004433 while (count--) {
4434 AllocRecord* record = &recent_allocation_records_[i];
4435
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004436 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4437 << PrettyClass(record->Type());
Elliott Hughes545a0642011-11-08 19:10:03 -08004438
4439 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004440 AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4441 mirror::ArtMethod* m = stack_element->Method();
Ian Rogersc0542af2014-09-03 16:16:56 -07004442 if (m == nullptr) {
Elliott Hughes545a0642011-11-08 19:10:03 -08004443 break;
4444 }
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004445 LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber();
Elliott Hughes545a0642011-11-08 19:10:03 -08004446 }
4447
4448 // pause periodically to help logcat catch up
4449 if ((count % 5) == 0) {
4450 usleep(40000);
4451 }
4452
Ian Rogers719d1a32014-03-06 12:13:39 -08004453 i = (i + 1) & (alloc_record_max_ - 1);
Elliott Hughes545a0642011-11-08 19:10:03 -08004454 }
4455}
4456
4457class StringTable {
4458 public:
4459 StringTable() {
4460 }
4461
Mathieu Chartier4345c462014-06-27 10:20:14 -07004462 void Add(const std::string& str) {
4463 table_.insert(str);
4464 }
4465
4466 void Add(const char* str) {
4467 table_.insert(str);
Elliott Hughes545a0642011-11-08 19:10:03 -08004468 }
4469
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004470 size_t IndexOf(const char* s) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07004471 auto it = table_.find(s);
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004472 if (it == table_.end()) {
4473 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4474 }
4475 return std::distance(table_.begin(), it);
Elliott Hughes545a0642011-11-08 19:10:03 -08004476 }
4477
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004478 size_t Size() const {
Elliott Hughes545a0642011-11-08 19:10:03 -08004479 return table_.size();
4480 }
4481
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004482 void WriteTo(std::vector<uint8_t>& bytes) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07004483 for (const std::string& str : table_) {
4484 const char* s = str.c_str();
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08004485 size_t s_len = CountModifiedUtf8Chars(s);
Ian Rogers700a4022014-05-19 16:49:03 -07004486 std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08004487 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4488 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
Elliott Hughes545a0642011-11-08 19:10:03 -08004489 }
4490 }
4491
4492 private:
Elliott Hughesa8f93cb2012-06-08 17:08:48 -07004493 std::set<std::string> table_;
Elliott Hughes545a0642011-11-08 19:10:03 -08004494 DISALLOW_COPY_AND_ASSIGN(StringTable);
4495};
4496
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07004497static const char* GetMethodSourceFile(mirror::ArtMethod* method)
Sebastien Hertz280286a2014-04-28 09:26:50 +02004498 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07004499 DCHECK(method != nullptr);
4500 const char* source_file = method->GetDeclaringClassSourceFile();
Sebastien Hertz280286a2014-04-28 09:26:50 +02004501 return (source_file != nullptr) ? source_file : "";
4502}
4503
Elliott Hughes545a0642011-11-08 19:10:03 -08004504/*
4505 * The data we send to DDMS contains everything we have recorded.
4506 *
4507 * Message header (all values big-endian):
4508 * (1b) message header len (to allow future expansion); includes itself
4509 * (1b) entry header len
4510 * (1b) stack frame len
4511 * (2b) number of entries
4512 * (4b) offset to string table from start of message
4513 * (2b) number of class name strings
4514 * (2b) number of method name strings
4515 * (2b) number of source file name strings
4516 * For each entry:
4517 * (4b) total allocation size
Elliott Hughes221229c2013-01-08 18:17:50 -08004518 * (2b) thread id
Elliott Hughes545a0642011-11-08 19:10:03 -08004519 * (2b) allocated object's class name index
4520 * (1b) stack depth
4521 * For each stack frame:
4522 * (2b) method's class name
4523 * (2b) method name
4524 * (2b) method source file
4525 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
4526 * (xb) class name strings
4527 * (xb) method name strings
4528 * (xb) source file strings
4529 *
4530 * As with other DDM traffic, strings are sent as a 4-byte length
4531 * followed by UTF-16 data.
4532 *
4533 * We send up 16-bit unsigned indexes into string tables. In theory there
Brian Carlstrom306db812014-09-05 13:01:41 -07004534 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
Elliott Hughes545a0642011-11-08 19:10:03 -08004535 * each table, but in practice there should be far fewer.
4536 *
4537 * The chief reason for using a string table here is to keep the size of
4538 * the DDMS message to a minimum. This is partly to make the protocol
4539 * efficient, but also because we have to form the whole thing up all at
4540 * once in a memory buffer.
4541 *
4542 * We use separate string tables for class names, method names, and source
4543 * files to keep the indexes small. There will generally be no overlap
4544 * between the contents of these tables.
4545 */
4546jbyteArray Dbg::GetRecentAllocations() {
4547 if (false) {
4548 DumpRecentAllocations();
4549 }
4550
Ian Rogers50b35e22012-10-04 10:09:15 -07004551 Thread* self = Thread::Current();
Elliott Hughes545a0642011-11-08 19:10:03 -08004552 std::vector<uint8_t> bytes;
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004553 {
Brian Carlstrom306db812014-09-05 13:01:41 -07004554 MutexLock mu(self, *Locks::alloc_tracker_lock_);
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004555 //
4556 // Part 1: generate string tables.
4557 //
4558 StringTable class_names;
4559 StringTable method_names;
4560 StringTable filenames;
Elliott Hughes545a0642011-11-08 19:10:03 -08004561
Brian Carlstrom306db812014-09-05 13:01:41 -07004562 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4563 uint16_t count = capped_count;
4564 size_t idx = HeadIndex();
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004565 while (count--) {
4566 AllocRecord* record = &recent_allocation_records_[idx];
Ian Rogers1ff3c982014-08-12 02:30:58 -07004567 std::string temp;
4568 class_names.Add(record->Type()->GetDescriptor(&temp));
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004569 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004570 mirror::ArtMethod* m = record->StackElement(i)->Method();
Ian Rogersc0542af2014-09-03 16:16:56 -07004571 if (m != nullptr) {
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07004572 class_names.Add(m->GetDeclaringClassDescriptor());
4573 method_names.Add(m->GetName());
4574 filenames.Add(GetMethodSourceFile(m));
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004575 }
4576 }
Elliott Hughes545a0642011-11-08 19:10:03 -08004577
Ian Rogers719d1a32014-03-06 12:13:39 -08004578 idx = (idx + 1) & (alloc_record_max_ - 1);
Elliott Hughes545a0642011-11-08 19:10:03 -08004579 }
4580
Brian Carlstrom306db812014-09-05 13:01:41 -07004581 LOG(INFO) << "allocation records: " << capped_count;
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004582
4583 //
4584 // Part 2: Generate the output and store it in the buffer.
4585 //
4586
4587 // (1b) message header len (to allow future expansion); includes itself
4588 // (1b) entry header len
4589 // (1b) stack frame len
4590 const int kMessageHeaderLen = 15;
4591 const int kEntryHeaderLen = 9;
4592 const int kStackFrameLen = 8;
4593 JDWP::Append1BE(bytes, kMessageHeaderLen);
4594 JDWP::Append1BE(bytes, kEntryHeaderLen);
4595 JDWP::Append1BE(bytes, kStackFrameLen);
4596
4597 // (2b) number of entries
4598 // (4b) offset to string table from start of message
4599 // (2b) number of class name strings
4600 // (2b) number of method name strings
4601 // (2b) number of source file name strings
Brian Carlstrom306db812014-09-05 13:01:41 -07004602 JDWP::Append2BE(bytes, capped_count);
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004603 size_t string_table_offset = bytes.size();
Brian Carlstrom7934ac22013-07-26 10:54:15 -07004604 JDWP::Append4BE(bytes, 0); // We'll patch this later...
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004605 JDWP::Append2BE(bytes, class_names.Size());
4606 JDWP::Append2BE(bytes, method_names.Size());
4607 JDWP::Append2BE(bytes, filenames.Size());
4608
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004609 idx = HeadIndex();
Ian Rogers1ff3c982014-08-12 02:30:58 -07004610 std::string temp;
Brian Carlstrom306db812014-09-05 13:01:41 -07004611 for (count = capped_count; count != 0; --count) {
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004612 // For each entry:
4613 // (4b) total allocation size
4614 // (2b) thread id
4615 // (2b) allocated object's class name index
4616 // (1b) stack depth
4617 AllocRecord* record = &recent_allocation_records_[idx];
4618 size_t stack_depth = record->GetDepth();
Mathieu Chartierf8322842014-05-16 10:59:25 -07004619 size_t allocated_object_class_name_index =
Ian Rogers1ff3c982014-08-12 02:30:58 -07004620 class_names.IndexOf(record->Type()->GetDescriptor(&temp));
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004621 JDWP::Append4BE(bytes, record->ByteCount());
4622 JDWP::Append2BE(bytes, record->ThinLockId());
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004623 JDWP::Append2BE(bytes, allocated_object_class_name_index);
4624 JDWP::Append1BE(bytes, stack_depth);
4625
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004626 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4627 // For each stack frame:
4628 // (2b) method's class name
4629 // (2b) method name
4630 // (2b) method source file
4631 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004632 mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
Mathieu Chartierbfd9a432014-05-21 17:43:44 -07004633 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4634 size_t method_name_index = method_names.IndexOf(m->GetName());
4635 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004636 JDWP::Append2BE(bytes, class_name_index);
4637 JDWP::Append2BE(bytes, method_name_index);
4638 JDWP::Append2BE(bytes, file_name_index);
Hiroshi Yamauchib5a9e3d2014-06-09 12:11:20 -07004639 JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004640 }
Ian Rogers719d1a32014-03-06 12:13:39 -08004641 idx = (idx + 1) & (alloc_record_max_ - 1);
Mathieu Chartier46e811b2013-07-10 17:09:14 -07004642 }
4643
4644 // (xb) class name strings
4645 // (xb) method name strings
4646 // (xb) source file strings
4647 JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4648 class_names.WriteTo(bytes);
4649 method_names.WriteTo(bytes);
4650 filenames.WriteTo(bytes);
Elliott Hughes545a0642011-11-08 19:10:03 -08004651 }
Ian Rogers50b35e22012-10-04 10:09:15 -07004652 JNIEnv* env = self->GetJniEnv();
Elliott Hughes545a0642011-11-08 19:10:03 -08004653 jbyteArray result = env->NewByteArray(bytes.size());
Ian Rogersc0542af2014-09-03 16:16:56 -07004654 if (result != nullptr) {
Elliott Hughes545a0642011-11-08 19:10:03 -08004655 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4656 }
4657 return result;
4658}
4659
Hiroshi Yamauchi0ec17d22014-07-07 13:07:08 -07004660mirror::ArtMethod* DeoptimizationRequest::Method() const {
4661 ScopedObjectAccessUnchecked soa(Thread::Current());
4662 return soa.DecodeMethod(method_);
4663}
4664
4665void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4666 ScopedObjectAccessUnchecked soa(Thread::Current());
4667 method_ = soa.EncodeMethod(m);
4668}
4669
Elliott Hughes872d4ec2011-10-21 17:07:15 -07004670} // namespace art