blob: d20522574b306e54747e49f4c980435dacc5fe1e [file] [log] [blame]
Elliott Hughes6c1a3942011-08-17 15:00:06 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Mathieu Chartierc56057e2014-05-04 13:18:58 -070017#include "indirect_reference_table-inl.h"
18
David Sehr1ce2b3b2018-04-05 11:02:03 -070019#include "base/mutator_locked_dumpable.h"
Mathieu Chartierdabdc0f2016-03-04 14:58:03 -080020#include "base/systrace.h"
David Sehrc431b9d2018-03-02 12:01:51 -080021#include "base/utils.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010022#include "jni/java_vm_ext.h"
23#include "jni/jni_internal.h"
Roland Levillain2e8aa8d2018-09-26 18:13:19 +010024#include "mirror/object-inl.h"
Mathieu Chartierff6d8cf2015-06-02 13:40:12 -070025#include "nth_caller_visitor.h"
Elliott Hughes6c1a3942011-08-17 15:00:06 -070026#include "reference_table.h"
Elliott Hughesa2501992011-08-26 19:39:54 -070027#include "runtime.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070028#include "scoped_thread_state_change-inl.h"
Ian Rogers5a7a74a2011-09-26 16:32:29 -070029#include "thread.h"
Elliott Hughes6c1a3942011-08-17 15:00:06 -070030
31#include <cstdlib>
32
33namespace art {
34
Mathieu Chartier2ada67b2015-07-30 11:41:04 -070035static constexpr bool kDumpStackOnNonLocalReference = false;
Andreas Gampee03662b2016-10-13 17:12:56 -070036static constexpr bool kDebugIRT = false;
Mathieu Chartier2ada67b2015-07-30 11:41:04 -070037
Andreas Gampe0ece10d2017-05-31 20:09:28 -070038// Maximum table size we allow.
39static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
40
Andreas Gampef1e86302016-10-03 11:42:31 -070041const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
42 switch (kind) {
43 case kHandleScopeOrInvalid:
44 return "HandleScopeOrInvalid";
45 case kLocal:
46 return "Local";
47 case kGlobal:
48 return "Global";
49 case kWeakGlobal:
50 return "WeakGlobal";
51 }
52 return "IndirectRefKind Error";
53}
54
Andreas Gampef1e86302016-10-03 11:42:31 -070055void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
Elliott Hughesa2501992011-08-26 19:39:54 -070056 // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
Ian Rogers68d8b422014-07-17 11:09:10 -070057 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
58 if (!vm->IsCheckJniEnabled()) {
Elliott Hughesa2501992011-08-26 19:39:54 -070059 // Otherwise, we want to abort rather than hand back a bad reference.
Andreas Gampef1e86302016-10-03 11:42:31 -070060 LOG(FATAL) << msg;
61 } else {
62 LOG(ERROR) << msg;
Elliott Hughesa2501992011-08-26 19:39:54 -070063 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -070064}
65
Andreas Gampea8e3b862016-10-17 20:12:52 -070066IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
67 IndirectRefKind desired_kind,
Andreas Gampe9d7ef622016-10-24 19:35:19 -070068 ResizableCapacity resizable,
Richard Uhlerda0a69e2016-10-11 15:06:38 +010069 std::string* error_msg)
Andreas Gampee03662b2016-10-13 17:12:56 -070070 : segment_state_(kIRTFirstSegment),
71 kind_(desired_kind),
72 max_entries_(max_count),
Andreas Gampe9d7ef622016-10-24 19:35:19 -070073 current_num_holes_(0),
74 resizable_(resizable) {
Richard Uhlerda0a69e2016-10-11 15:06:38 +010075 CHECK(error_msg != nullptr);
Andreas Gampea8e3b862016-10-17 20:12:52 -070076 CHECK_NE(desired_kind, kHandleScopeOrInvalid);
Elliott Hughes6c1a3942011-08-17 15:00:06 -070077
Andreas Gampe0ece10d2017-05-31 20:09:28 -070078 // Overflow and maximum check.
79 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
80
Andreas Gampea8e3b862016-10-17 20:12:52 -070081 const size_t table_bytes = max_count * sizeof(IrtEntry);
Vladimir Markoc34bebf2018-08-16 16:12:49 +010082 table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
83 /* addr */ nullptr,
84 table_bytes,
85 PROT_READ | PROT_WRITE,
86 /* low_4gb */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010087 error_msg);
88 if (!table_mem_map_.IsValid() && error_msg->empty()) {
Richard Uhlerda0a69e2016-10-11 15:06:38 +010089 *error_msg = "Unable to map memory for indirect ref table";
Andreas Gampe3f5881f2015-04-08 10:26:16 -070090 }
Richard Uhlerda0a69e2016-10-11 15:06:38 +010091
Vladimir Markoc34bebf2018-08-16 16:12:49 +010092 if (table_mem_map_.IsValid()) {
93 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
Richard Uhlerda0a69e2016-10-11 15:06:38 +010094 } else {
95 table_ = nullptr;
96 }
Andreas Gampee03662b2016-10-13 17:12:56 -070097 segment_state_ = kIRTFirstSegment;
Andreas Gampe94a52022016-10-25 12:01:48 -070098 last_known_previous_state_ = kIRTFirstSegment;
Elliott Hughes6c1a3942011-08-17 15:00:06 -070099}
100
101IndirectReferenceTable::~IndirectReferenceTable() {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700102}
103
Andreas Gampedc061d02016-10-24 13:19:37 -0700104void IndirectReferenceTable::ConstexprChecks() {
105 // Use this for some assertions. They can't be put into the header as C++ wants the class
106 // to be complete.
107
108 // Check kind.
109 static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
110 static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
111 static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
112 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
113 "Kind encoding error");
114 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
115 "Kind encoding error");
116 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
117 "Kind encoding error");
118
119 // Check serial.
120 static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
121 static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
122 static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
123 static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
124
125 // Table index.
126 static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
127 static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
128 static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
129 static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
130}
131
Andreas Gampe3f5881f2015-04-08 10:26:16 -0700132bool IndirectReferenceTable::IsValid() const {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100133 return table_mem_map_.IsValid();
Andreas Gampe3f5881f2015-04-08 10:26:16 -0700134}
135
Andreas Gampee03662b2016-10-13 17:12:56 -0700136// Holes:
137//
138// To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
139// operation sequences. For simplicity and lower memory overhead, we do not use a free list or
140// similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
141// are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
142// scans when there are no holes, the number of known holes should be tracked.
143//
144// A previous implementation stored the top index and the number of holes as the segment state.
145// This constraints the maximum number of references to 16-bit. We want to relax this, as it
146// is easy to require more references (e.g., to list all classes in large applications). Thus,
147// the implicitly stack-stored state, the IRTSegmentState, is only the top index.
148//
149// Thus, hole count is a local property of the current segment, and needs to be recovered when
150// (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
151// cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
152// hole count is correct.
153//
154// To be able to detect segment changes, we require an additional local field that can describe
155// the known segment. This is last_known_previous_state_. The requirement will become clear with
156// the following (some non-trivial) cases that have to be supported:
157//
158// 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
159// 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
160// 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
161// reference
162// 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
163// 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
164// reference
165//
166// Storing the last known *previous* state (bottom index) allows conservatively detecting all the
167// segment changes above. The condition is simply that the last known state is greater than or
168// equal to the current previous state, and smaller than the current state (top index). The
169// condition is conservative as it adds O(1) overhead to operations on an empty segment.
170
171static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
172 size_t count = 0;
173 for (size_t index = from; index != to; ++index) {
174 if (table[index].GetReference()->IsNull()) {
175 count++;
176 }
177 }
178 return count;
179}
180
181void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
182 if (last_known_previous_state_.top_index >= segment_state_.top_index ||
183 last_known_previous_state_.top_index < prev_state.top_index) {
184 const size_t top_index = segment_state_.top_index;
185 size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
186
187 if (kDebugIRT) {
188 LOG(INFO) << "+++ Recovered holes: "
189 << " Current prev=" << prev_state.top_index
190 << " Current top_index=" << top_index
191 << " Old num_holes=" << current_num_holes_
192 << " New num_holes=" << count;
193 }
194
195 current_num_holes_ = count;
196 last_known_previous_state_ = prev_state;
197 } else if (kDebugIRT) {
198 LOG(INFO) << "No need to recover holes";
199 }
200}
201
202ALWAYS_INLINE
203static inline void CheckHoleCount(IrtEntry* table,
204 size_t exp_num_holes,
205 IRTSegmentState prev_state,
206 IRTSegmentState cur_state) {
207 if (kIsDebugBuild) {
208 size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
209 CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
210 << " topIndex=" << cur_state.top_index;
211 }
212}
213
214bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
215 CHECK_GT(new_size, max_entries_);
216
Andreas Gampe0ece10d2017-05-31 20:09:28 -0700217 constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
218 if (new_size > kMaxEntries) {
219 *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
220 return false;
221 }
222 // Note: the above check also ensures that there is no overflow below.
223
Andreas Gampee03662b2016-10-13 17:12:56 -0700224 const size_t table_bytes = new_size * sizeof(IrtEntry);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100225 MemMap new_map = MemMap::MapAnonymous("indirect ref table",
226 /* addr */ nullptr,
227 table_bytes,
228 PROT_READ | PROT_WRITE,
229 /* is_low_4gb */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100230 error_msg);
231 if (!new_map.IsValid()) {
Andreas Gampee03662b2016-10-13 17:12:56 -0700232 return false;
233 }
234
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100235 memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
Andreas Gampee03662b2016-10-13 17:12:56 -0700236 table_mem_map_ = std::move(new_map);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100237 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
Andreas Gampee03662b2016-10-13 17:12:56 -0700238 max_entries_ = new_size;
239
240 return true;
241}
242
243IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
Andreas Gampe25651122017-09-25 14:50:23 -0700244 ObjPtr<mirror::Object> obj,
245 std::string* error_msg) {
Andreas Gampee03662b2016-10-13 17:12:56 -0700246 if (kDebugIRT) {
247 LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
248 << " top_index=" << segment_state_.top_index
249 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
250 << " holes=" << current_num_holes_;
251 }
252
253 size_t top_index = segment_state_.top_index;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700254
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700255 CHECK(obj != nullptr);
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700256 VerifyObject(obj);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700257 DCHECK(table_ != nullptr);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700258
Andreas Gampee03662b2016-10-13 17:12:56 -0700259 if (top_index == max_entries_) {
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700260 if (resizable_ == ResizableCapacity::kNo) {
Andreas Gampe25651122017-09-25 14:50:23 -0700261 std::ostringstream oss;
262 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
263 << "(max=" << max_entries_ << ")"
264 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
265 *error_msg = oss.str();
266 return nullptr;
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700267 }
268
269 // Try to double space.
Andreas Gampe0ece10d2017-05-31 20:09:28 -0700270 if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
Andreas Gampe25651122017-09-25 14:50:23 -0700271 std::ostringstream oss;
272 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
273 << "(max=" << max_entries_ << ")" << std::endl
274 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
275 << " Resizing failed: exceeds size_t";
276 *error_msg = oss.str();
277 return nullptr;
Andreas Gampe0ece10d2017-05-31 20:09:28 -0700278 }
279
Andreas Gampe25651122017-09-25 14:50:23 -0700280 std::string inner_error_msg;
281 if (!Resize(max_entries_ * 2, &inner_error_msg)) {
282 std::ostringstream oss;
283 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
284 << "(max=" << max_entries_ << ")" << std::endl
285 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
286 << " Resizing failed: " << inner_error_msg;
287 *error_msg = oss.str();
288 return nullptr;
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700289 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700290 }
291
Andreas Gampee03662b2016-10-13 17:12:56 -0700292 RecoverHoles(previous_state);
293 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
294
Elliott Hughes73e66f72012-05-09 09:34:45 -0700295 // We know there's enough room in the table. Now we just need to find
296 // the right spot. If there's a hole, find it and fill it; otherwise,
297 // add to the end of the list.
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700298 IndirectRef result;
Mathieu Chartier4838d662014-09-25 15:27:43 -0700299 size_t index;
Andreas Gampee03662b2016-10-13 17:12:56 -0700300 if (current_num_holes_ > 0) {
301 DCHECK_GT(top_index, 1U);
Elliott Hughes73e66f72012-05-09 09:34:45 -0700302 // Find the first hole; likely to be near the end of the list.
Andreas Gampee03662b2016-10-13 17:12:56 -0700303 IrtEntry* p_scan = &table_[top_index - 1];
304 DCHECK(!p_scan->GetReference()->IsNull());
305 --p_scan;
306 while (!p_scan->GetReference()->IsNull()) {
307 DCHECK_GE(p_scan, table_ + previous_state.top_index);
308 --p_scan;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700309 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700310 index = p_scan - table_;
311 current_num_holes_--;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700312 } else {
Elliott Hughes73e66f72012-05-09 09:34:45 -0700313 // Add to the end.
Andreas Gampee03662b2016-10-13 17:12:56 -0700314 index = top_index++;
315 segment_state_.top_index = top_index;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700316 }
Mathieu Chartier4838d662014-09-25 15:27:43 -0700317 table_[index].Add(obj);
318 result = ToIndirectRef(index);
Andreas Gampee03662b2016-10-13 17:12:56 -0700319 if (kDebugIRT) {
320 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
321 << " holes=" << current_num_holes_;
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700322 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700323
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700324 DCHECK(result != nullptr);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700325 return result;
326}
327
Elliott Hughes726079d2011-10-07 18:43:44 -0700328void IndirectReferenceTable::AssertEmpty() {
Hiroshi Yamauchi8a741172014-09-08 13:22:56 -0700329 for (size_t i = 0; i < Capacity(); ++i) {
Mathieu Chartier4838d662014-09-25 15:27:43 -0700330 if (!table_[i].GetReference()->IsNull()) {
Hiroshi Yamauchi8a741172014-09-08 13:22:56 -0700331 LOG(FATAL) << "Internal Error: non-empty local reference table\n"
332 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
Mathieu Chartier8778c522016-10-04 19:06:30 -0700333 UNREACHABLE();
Hiroshi Yamauchi8a741172014-09-08 13:22:56 -0700334 }
Elliott Hughes726079d2011-10-07 18:43:44 -0700335 }
336}
337
Elliott Hughes73e66f72012-05-09 09:34:45 -0700338// Removes an object. We extract the table offset bits from "iref"
339// and zap the corresponding entry, leaving a hole if it's not at the top.
340// If the entry is not between the current top index and the bottom index
341// specified by the cookie, we don't remove anything. This is the behavior
342// required by JNI's DeleteLocalRef function.
343// This method is not called when a local frame is popped; this is only used
344// for explicit single removals.
345// Returns "false" if nothing was removed.
Andreas Gampee03662b2016-10-13 17:12:56 -0700346bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
347 if (kDebugIRT) {
348 LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
349 << " top_index=" << segment_state_.top_index
350 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
351 << " holes=" << current_num_holes_;
352 }
353
354 const uint32_t top_index = segment_state_.top_index;
355 const uint32_t bottom_index = previous_state.top_index;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700356
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700357 DCHECK(table_ != nullptr);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700358
Mathieu Chartierc263bf82015-04-29 09:57:48 -0700359 if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
360 auto* self = Thread::Current();
361 if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
362 auto* env = self->GetJniEnv();
363 DCHECK(env != nullptr);
Ian Rogers55256cb2017-12-21 17:07:11 -0800364 if (env->IsCheckJniEnabled()) {
Mathieu Chartierff6d8cf2015-06-02 13:40:12 -0700365 ScopedObjectAccess soa(self);
366 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
Mathieu Chartier2ada67b2015-07-30 11:41:04 -0700367 if (kDumpStackOnNonLocalReference) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700368 self->Dump(LOG_STREAM(WARNING));
Mathieu Chartier2ada67b2015-07-30 11:41:04 -0700369 }
Mathieu Chartierc263bf82015-04-29 09:57:48 -0700370 }
371 return true;
372 }
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700373 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700374 const uint32_t idx = ExtractIndex(iref);
375 if (idx < bottom_index) {
Elliott Hughes726079d2011-10-07 18:43:44 -0700376 // Wrong segment.
377 LOG(WARNING) << "Attempt to remove index outside index area (" << idx
Andreas Gampee03662b2016-10-13 17:12:56 -0700378 << " vs " << bottom_index << "-" << top_index << ")";
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700379 return false;
380 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700381 if (idx >= top_index) {
Elliott Hughes726079d2011-10-07 18:43:44 -0700382 // Bad --- stale reference?
383 LOG(WARNING) << "Attempt to remove invalid index " << idx
Andreas Gampee03662b2016-10-13 17:12:56 -0700384 << " (bottom=" << bottom_index << " top=" << top_index << ")";
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700385 return false;
386 }
387
Andreas Gampee03662b2016-10-13 17:12:56 -0700388 RecoverHoles(previous_state);
389 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
390
391 if (idx == top_index - 1) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700392 // Top-most entry. Scan up and consume holes.
393
Ian Rogers987560f2014-04-22 11:42:59 -0700394 if (!CheckEntry("remove", iref, idx)) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700395 return false;
396 }
397
Mathieu Chartier4838d662014-09-25 15:27:43 -0700398 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
Andreas Gampee03662b2016-10-13 17:12:56 -0700399 if (current_num_holes_ != 0) {
400 uint32_t collapse_top_index = top_index;
401 while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
402 if (kDebugIRT) {
403 ScopedObjectAccess soa(Thread::Current());
404 LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
405 << " (previous_state=" << bottom_index << ") val="
406 << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700407 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700408 if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700409 break;
410 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700411 if (kDebugIRT) {
412 LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700413 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700414 current_num_holes_--;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700415 }
Andreas Gampee03662b2016-10-13 17:12:56 -0700416 segment_state_.top_index = collapse_top_index;
417
418 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700419 } else {
Andreas Gampee03662b2016-10-13 17:12:56 -0700420 segment_state_.top_index = top_index - 1;
421 if (kDebugIRT) {
422 LOG(INFO) << "+++ ate last entry " << top_index - 1;
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700423 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700424 }
425 } else {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700426 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
427 // from deleting it twice and screwing up the hole count.
Mathieu Chartier4838d662014-09-25 15:27:43 -0700428 if (table_[idx].GetReference()->IsNull()) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700429 LOG(INFO) << "--- WEIRD: removing null entry " << idx;
430 return false;
431 }
Ian Rogers987560f2014-04-22 11:42:59 -0700432 if (!CheckEntry("remove", iref, idx)) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700433 return false;
434 }
435
Mathieu Chartier4838d662014-09-25 15:27:43 -0700436 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
Andreas Gampee03662b2016-10-13 17:12:56 -0700437 current_num_holes_++;
438 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
439 if (kDebugIRT) {
440 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700441 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700442 }
443
444 return true;
445}
446
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -0800447void IndirectReferenceTable::Trim() {
Mathieu Chartierdabdc0f2016-03-04 14:58:03 -0800448 ScopedTrace trace(__PRETTY_FUNCTION__);
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -0800449 const size_t top_index = Capacity();
450 auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100451 uint8_t* release_end = table_mem_map_.End();
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -0800452 madvise(release_start, release_end - release_start, MADV_DONTNEED);
453}
454
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700455void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700456 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700457 for (auto ref : *this) {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700458 if (!ref->IsNull()) {
459 root_visitor.VisitRoot(*ref);
460 DCHECK(!ref->IsNull());
461 }
Elliott Hughes410c0c82011-09-01 17:58:25 -0700462 }
463}
464
Elliott Hughes73e66f72012-05-09 09:34:45 -0700465void IndirectReferenceTable::Dump(std::ostream& os) const {
466 os << kind_ << " table dump:\n";
Hiroshi Yamauchi196851b2014-05-29 12:16:04 -0700467 ReferenceTable::Table entries;
468 for (size_t i = 0; i < Capacity(); ++i) {
Mathieu Chartier8778c522016-10-04 19:06:30 -0700469 ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700470 if (obj != nullptr) {
Mathieu Chartier4838d662014-09-25 15:27:43 -0700471 obj = table_[i].GetReference()->Read();
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700472 entries.push_back(GcRoot<mirror::Object>(obj));
Ian Rogers63818dc2012-09-26 12:23:04 -0700473 }
474 }
Elliott Hughes73e66f72012-05-09 09:34:45 -0700475 ReferenceTable::Dump(os, entries);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700476}
477
Andreas Gampee03662b2016-10-13 17:12:56 -0700478void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
479 if (kDebugIRT) {
480 LOG(INFO) << "Setting segment state: "
481 << segment_state_.top_index
482 << " -> "
483 << new_state.top_index;
484 }
485 segment_state_ = new_state;
486}
487
Andreas Gampe88831082017-05-31 19:46:03 -0700488bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
489 size_t top_index = segment_state_.top_index;
490 if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) {
491 return true;
492 }
493
494 // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
495 if (resizable_ == ResizableCapacity::kNo) {
496 *error_msg = "Table is not resizable";
497 return false;
498 }
499
500 // Try to increase the table size.
501
502 // Would this overflow?
503 if (std::numeric_limits<size_t>::max() - free_capacity < top_index) {
504 *error_msg = "Cannot resize table, overflow.";
505 return false;
506 }
507
508 if (!Resize(top_index + free_capacity, error_msg)) {
509 LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
510 << "): " << std::endl
511 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
512 << " Resizing failed: " << *error_msg;
513 return false;
514 }
515 return true;
516}
517
Andreas Gampe1b35b462017-09-29 18:52:15 -0700518size_t IndirectReferenceTable::FreeCapacity() const {
Andreas Gampe88831082017-05-31 19:46:03 -0700519 return max_entries_ - segment_state_.top_index;
520}
521
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700522} // namespace art