Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 17 | #include "indirect_reference_table-inl.h" |
| 18 | |
David Sehr | 1ce2b3b | 2018-04-05 11:02:03 -0700 | [diff] [blame] | 19 | #include "base/mutator_locked_dumpable.h" |
Mathieu Chartier | dabdc0f | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 20 | #include "base/systrace.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 21 | #include "base/utils.h" |
Vladimir Marko | a3ad0cd | 2018-05-04 10:06:38 +0100 | [diff] [blame] | 22 | #include "jni/java_vm_ext.h" |
| 23 | #include "jni/jni_internal.h" |
Roland Levillain | 2e8aa8d | 2018-09-26 18:13:19 +0100 | [diff] [blame^] | 24 | #include "mirror/object-inl.h" |
Mathieu Chartier | ff6d8cf | 2015-06-02 13:40:12 -0700 | [diff] [blame] | 25 | #include "nth_caller_visitor.h" |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 26 | #include "reference_table.h" |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 27 | #include "runtime.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 28 | #include "scoped_thread_state_change-inl.h" |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 29 | #include "thread.h" |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 30 | |
| 31 | #include <cstdlib> |
| 32 | |
| 33 | namespace art { |
| 34 | |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 35 | static constexpr bool kDumpStackOnNonLocalReference = false; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 36 | static constexpr bool kDebugIRT = false; |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 37 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 38 | // Maximum table size we allow. |
| 39 | static constexpr size_t kMaxTableSizeInBytes = 128 * MB; |
| 40 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 41 | const char* GetIndirectRefKindString(const IndirectRefKind& kind) { |
| 42 | switch (kind) { |
| 43 | case kHandleScopeOrInvalid: |
| 44 | return "HandleScopeOrInvalid"; |
| 45 | case kLocal: |
| 46 | return "Local"; |
| 47 | case kGlobal: |
| 48 | return "Global"; |
| 49 | case kWeakGlobal: |
| 50 | return "WeakGlobal"; |
| 51 | } |
| 52 | return "IndirectRefKind Error"; |
| 53 | } |
| 54 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 55 | void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) { |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 56 | // If -Xcheck:jni is on, it'll give a more detailed error before aborting. |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 57 | JavaVMExt* vm = Runtime::Current()->GetJavaVM(); |
| 58 | if (!vm->IsCheckJniEnabled()) { |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 59 | // Otherwise, we want to abort rather than hand back a bad reference. |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 60 | LOG(FATAL) << msg; |
| 61 | } else { |
| 62 | LOG(ERROR) << msg; |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 63 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 66 | IndirectReferenceTable::IndirectReferenceTable(size_t max_count, |
| 67 | IndirectRefKind desired_kind, |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 68 | ResizableCapacity resizable, |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 69 | std::string* error_msg) |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 70 | : segment_state_(kIRTFirstSegment), |
| 71 | kind_(desired_kind), |
| 72 | max_entries_(max_count), |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 73 | current_num_holes_(0), |
| 74 | resizable_(resizable) { |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 75 | CHECK(error_msg != nullptr); |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 76 | CHECK_NE(desired_kind, kHandleScopeOrInvalid); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 77 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 78 | // Overflow and maximum check. |
| 79 | CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry)); |
| 80 | |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 81 | const size_t table_bytes = max_count * sizeof(IrtEntry); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 82 | table_mem_map_ = MemMap::MapAnonymous("indirect ref table", |
| 83 | /* addr */ nullptr, |
| 84 | table_bytes, |
| 85 | PROT_READ | PROT_WRITE, |
| 86 | /* low_4gb */ false, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 87 | error_msg); |
| 88 | if (!table_mem_map_.IsValid() && error_msg->empty()) { |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 89 | *error_msg = "Unable to map memory for indirect ref table"; |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 90 | } |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 91 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 92 | if (table_mem_map_.IsValid()) { |
| 93 | table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin()); |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 94 | } else { |
| 95 | table_ = nullptr; |
| 96 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 97 | segment_state_ = kIRTFirstSegment; |
Andreas Gampe | 94a5202 | 2016-10-25 12:01:48 -0700 | [diff] [blame] | 98 | last_known_previous_state_ = kIRTFirstSegment; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | IndirectReferenceTable::~IndirectReferenceTable() { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 102 | } |
| 103 | |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 104 | void IndirectReferenceTable::ConstexprChecks() { |
| 105 | // Use this for some assertions. They can't be put into the header as C++ wants the class |
| 106 | // to be complete. |
| 107 | |
| 108 | // Check kind. |
| 109 | static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 110 | static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 111 | static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 112 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal, |
| 113 | "Kind encoding error"); |
| 114 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal, |
| 115 | "Kind encoding error"); |
| 116 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal, |
| 117 | "Kind encoding error"); |
| 118 | |
| 119 | // Check serial. |
| 120 | static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error"); |
| 121 | static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error"); |
| 122 | static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error"); |
| 123 | static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error"); |
| 124 | |
| 125 | // Table index. |
| 126 | static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error"); |
| 127 | static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error"); |
| 128 | static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error"); |
| 129 | static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error"); |
| 130 | } |
| 131 | |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 132 | bool IndirectReferenceTable::IsValid() const { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 133 | return table_mem_map_.IsValid(); |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 136 | // Holes: |
| 137 | // |
| 138 | // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove |
| 139 | // operation sequences. For simplicity and lower memory overhead, we do not use a free list or |
| 140 | // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they |
| 141 | // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid |
| 142 | // scans when there are no holes, the number of known holes should be tracked. |
| 143 | // |
| 144 | // A previous implementation stored the top index and the number of holes as the segment state. |
| 145 | // This constraints the maximum number of references to 16-bit. We want to relax this, as it |
| 146 | // is easy to require more references (e.g., to list all classes in large applications). Thus, |
| 147 | // the implicitly stack-stored state, the IRTSegmentState, is only the top index. |
| 148 | // |
| 149 | // Thus, hole count is a local property of the current segment, and needs to be recovered when |
| 150 | // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we |
| 151 | // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current |
| 152 | // hole count is correct. |
| 153 | // |
| 154 | // To be able to detect segment changes, we require an additional local field that can describe |
| 155 | // the known segment. This is last_known_previous_state_. The requirement will become clear with |
| 156 | // the following (some non-trivial) cases that have to be supported: |
| 157 | // |
| 158 | // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference |
| 159 | // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference |
| 160 | // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove |
| 161 | // reference |
| 162 | // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference |
| 163 | // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove |
| 164 | // reference |
| 165 | // |
| 166 | // Storing the last known *previous* state (bottom index) allows conservatively detecting all the |
| 167 | // segment changes above. The condition is simply that the last known state is greater than or |
| 168 | // equal to the current previous state, and smaller than the current state (top index). The |
| 169 | // condition is conservative as it adds O(1) overhead to operations on an empty segment. |
| 170 | |
| 171 | static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) { |
| 172 | size_t count = 0; |
| 173 | for (size_t index = from; index != to; ++index) { |
| 174 | if (table[index].GetReference()->IsNull()) { |
| 175 | count++; |
| 176 | } |
| 177 | } |
| 178 | return count; |
| 179 | } |
| 180 | |
| 181 | void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) { |
| 182 | if (last_known_previous_state_.top_index >= segment_state_.top_index || |
| 183 | last_known_previous_state_.top_index < prev_state.top_index) { |
| 184 | const size_t top_index = segment_state_.top_index; |
| 185 | size_t count = CountNullEntries(table_, prev_state.top_index, top_index); |
| 186 | |
| 187 | if (kDebugIRT) { |
| 188 | LOG(INFO) << "+++ Recovered holes: " |
| 189 | << " Current prev=" << prev_state.top_index |
| 190 | << " Current top_index=" << top_index |
| 191 | << " Old num_holes=" << current_num_holes_ |
| 192 | << " New num_holes=" << count; |
| 193 | } |
| 194 | |
| 195 | current_num_holes_ = count; |
| 196 | last_known_previous_state_ = prev_state; |
| 197 | } else if (kDebugIRT) { |
| 198 | LOG(INFO) << "No need to recover holes"; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | ALWAYS_INLINE |
| 203 | static inline void CheckHoleCount(IrtEntry* table, |
| 204 | size_t exp_num_holes, |
| 205 | IRTSegmentState prev_state, |
| 206 | IRTSegmentState cur_state) { |
| 207 | if (kIsDebugBuild) { |
| 208 | size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index); |
| 209 | CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index |
| 210 | << " topIndex=" << cur_state.top_index; |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) { |
| 215 | CHECK_GT(new_size, max_entries_); |
| 216 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 217 | constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry); |
| 218 | if (new_size > kMaxEntries) { |
| 219 | *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size); |
| 220 | return false; |
| 221 | } |
| 222 | // Note: the above check also ensures that there is no overflow below. |
| 223 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 224 | const size_t table_bytes = new_size * sizeof(IrtEntry); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 225 | MemMap new_map = MemMap::MapAnonymous("indirect ref table", |
| 226 | /* addr */ nullptr, |
| 227 | table_bytes, |
| 228 | PROT_READ | PROT_WRITE, |
| 229 | /* is_low_4gb */ false, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 230 | error_msg); |
| 231 | if (!new_map.IsValid()) { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 232 | return false; |
| 233 | } |
| 234 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 235 | memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size()); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 236 | table_mem_map_ = std::move(new_map); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 237 | table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin()); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 238 | max_entries_ = new_size; |
| 239 | |
| 240 | return true; |
| 241 | } |
| 242 | |
| 243 | IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state, |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 244 | ObjPtr<mirror::Object> obj, |
| 245 | std::string* error_msg) { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 246 | if (kDebugIRT) { |
| 247 | LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index |
| 248 | << " top_index=" << segment_state_.top_index |
| 249 | << " last_known_prev_top_index=" << last_known_previous_state_.top_index |
| 250 | << " holes=" << current_num_holes_; |
| 251 | } |
| 252 | |
| 253 | size_t top_index = segment_state_.top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 254 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 255 | CHECK(obj != nullptr); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 256 | VerifyObject(obj); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 257 | DCHECK(table_ != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 258 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 259 | if (top_index == max_entries_) { |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 260 | if (resizable_ == ResizableCapacity::kNo) { |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 261 | std::ostringstream oss; |
| 262 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 263 | << "(max=" << max_entries_ << ")" |
| 264 | << MutatorLockedDumpable<IndirectReferenceTable>(*this); |
| 265 | *error_msg = oss.str(); |
| 266 | return nullptr; |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | // Try to double space. |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 270 | if (std::numeric_limits<size_t>::max() / 2 < max_entries_) { |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 271 | std::ostringstream oss; |
| 272 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 273 | << "(max=" << max_entries_ << ")" << std::endl |
| 274 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 275 | << " Resizing failed: exceeds size_t"; |
| 276 | *error_msg = oss.str(); |
| 277 | return nullptr; |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 280 | std::string inner_error_msg; |
| 281 | if (!Resize(max_entries_ * 2, &inner_error_msg)) { |
| 282 | std::ostringstream oss; |
| 283 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 284 | << "(max=" << max_entries_ << ")" << std::endl |
| 285 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 286 | << " Resizing failed: " << inner_error_msg; |
| 287 | *error_msg = oss.str(); |
| 288 | return nullptr; |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 289 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 290 | } |
| 291 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 292 | RecoverHoles(previous_state); |
| 293 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 294 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 295 | // We know there's enough room in the table. Now we just need to find |
| 296 | // the right spot. If there's a hole, find it and fill it; otherwise, |
| 297 | // add to the end of the list. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 298 | IndirectRef result; |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 299 | size_t index; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 300 | if (current_num_holes_ > 0) { |
| 301 | DCHECK_GT(top_index, 1U); |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 302 | // Find the first hole; likely to be near the end of the list. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 303 | IrtEntry* p_scan = &table_[top_index - 1]; |
| 304 | DCHECK(!p_scan->GetReference()->IsNull()); |
| 305 | --p_scan; |
| 306 | while (!p_scan->GetReference()->IsNull()) { |
| 307 | DCHECK_GE(p_scan, table_ + previous_state.top_index); |
| 308 | --p_scan; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 309 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 310 | index = p_scan - table_; |
| 311 | current_num_holes_--; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 312 | } else { |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 313 | // Add to the end. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 314 | index = top_index++; |
| 315 | segment_state_.top_index = top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 316 | } |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 317 | table_[index].Add(obj); |
| 318 | result = ToIndirectRef(index); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 319 | if (kDebugIRT) { |
| 320 | LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index |
| 321 | << " holes=" << current_num_holes_; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 322 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 323 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 324 | DCHECK(result != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 325 | return result; |
| 326 | } |
| 327 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 328 | void IndirectReferenceTable::AssertEmpty() { |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 329 | for (size_t i = 0; i < Capacity(); ++i) { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 330 | if (!table_[i].GetReference()->IsNull()) { |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 331 | LOG(FATAL) << "Internal Error: non-empty local reference table\n" |
| 332 | << MutatorLockedDumpable<IndirectReferenceTable>(*this); |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 333 | UNREACHABLE(); |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 334 | } |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 335 | } |
| 336 | } |
| 337 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 338 | // Removes an object. We extract the table offset bits from "iref" |
| 339 | // and zap the corresponding entry, leaving a hole if it's not at the top. |
| 340 | // If the entry is not between the current top index and the bottom index |
| 341 | // specified by the cookie, we don't remove anything. This is the behavior |
| 342 | // required by JNI's DeleteLocalRef function. |
| 343 | // This method is not called when a local frame is popped; this is only used |
| 344 | // for explicit single removals. |
| 345 | // Returns "false" if nothing was removed. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 346 | bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) { |
| 347 | if (kDebugIRT) { |
| 348 | LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index |
| 349 | << " top_index=" << segment_state_.top_index |
| 350 | << " last_known_prev_top_index=" << last_known_previous_state_.top_index |
| 351 | << " holes=" << current_num_holes_; |
| 352 | } |
| 353 | |
| 354 | const uint32_t top_index = segment_state_.top_index; |
| 355 | const uint32_t bottom_index = previous_state.top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 356 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 357 | DCHECK(table_ != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 358 | |
Mathieu Chartier | c263bf8 | 2015-04-29 09:57:48 -0700 | [diff] [blame] | 359 | if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) { |
| 360 | auto* self = Thread::Current(); |
| 361 | if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) { |
| 362 | auto* env = self->GetJniEnv(); |
| 363 | DCHECK(env != nullptr); |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 364 | if (env->IsCheckJniEnabled()) { |
Mathieu Chartier | ff6d8cf | 2015-06-02 13:40:12 -0700 | [diff] [blame] | 365 | ScopedObjectAccess soa(self); |
| 366 | LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 367 | if (kDumpStackOnNonLocalReference) { |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 368 | self->Dump(LOG_STREAM(WARNING)); |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 369 | } |
Mathieu Chartier | c263bf8 | 2015-04-29 09:57:48 -0700 | [diff] [blame] | 370 | } |
| 371 | return true; |
| 372 | } |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 373 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 374 | const uint32_t idx = ExtractIndex(iref); |
| 375 | if (idx < bottom_index) { |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 376 | // Wrong segment. |
| 377 | LOG(WARNING) << "Attempt to remove index outside index area (" << idx |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 378 | << " vs " << bottom_index << "-" << top_index << ")"; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 379 | return false; |
| 380 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 381 | if (idx >= top_index) { |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 382 | // Bad --- stale reference? |
| 383 | LOG(WARNING) << "Attempt to remove invalid index " << idx |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 384 | << " (bottom=" << bottom_index << " top=" << top_index << ")"; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 385 | return false; |
| 386 | } |
| 387 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 388 | RecoverHoles(previous_state); |
| 389 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 390 | |
| 391 | if (idx == top_index - 1) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 392 | // Top-most entry. Scan up and consume holes. |
| 393 | |
Ian Rogers | 987560f | 2014-04-22 11:42:59 -0700 | [diff] [blame] | 394 | if (!CheckEntry("remove", iref, idx)) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 395 | return false; |
| 396 | } |
| 397 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 398 | *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 399 | if (current_num_holes_ != 0) { |
| 400 | uint32_t collapse_top_index = top_index; |
| 401 | while (--collapse_top_index > bottom_index && current_num_holes_ != 0) { |
| 402 | if (kDebugIRT) { |
| 403 | ScopedObjectAccess soa(Thread::Current()); |
| 404 | LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1 |
| 405 | << " (previous_state=" << bottom_index << ") val=" |
| 406 | << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>(); |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 407 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 408 | if (!table_[collapse_top_index - 1].GetReference()->IsNull()) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 409 | break; |
| 410 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 411 | if (kDebugIRT) { |
| 412 | LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1); |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 413 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 414 | current_num_holes_--; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 415 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 416 | segment_state_.top_index = collapse_top_index; |
| 417 | |
| 418 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 419 | } else { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 420 | segment_state_.top_index = top_index - 1; |
| 421 | if (kDebugIRT) { |
| 422 | LOG(INFO) << "+++ ate last entry " << top_index - 1; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 423 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 424 | } |
| 425 | } else { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 426 | // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody |
| 427 | // from deleting it twice and screwing up the hole count. |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 428 | if (table_[idx].GetReference()->IsNull()) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 429 | LOG(INFO) << "--- WEIRD: removing null entry " << idx; |
| 430 | return false; |
| 431 | } |
Ian Rogers | 987560f | 2014-04-22 11:42:59 -0700 | [diff] [blame] | 432 | if (!CheckEntry("remove", iref, idx)) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 433 | return false; |
| 434 | } |
| 435 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 436 | *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 437 | current_num_holes_++; |
| 438 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 439 | if (kDebugIRT) { |
| 440 | LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 441 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | return true; |
| 445 | } |
| 446 | |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 447 | void IndirectReferenceTable::Trim() { |
Mathieu Chartier | dabdc0f | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 448 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 449 | const size_t top_index = Capacity(); |
| 450 | auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 451 | uint8_t* release_end = table_mem_map_.End(); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 452 | madvise(release_start, release_end - release_start, MADV_DONTNEED); |
| 453 | } |
| 454 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 455 | void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { |
Mathieu Chartier | 4809d0a | 2015-04-07 10:39:04 -0700 | [diff] [blame] | 456 | BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 457 | for (auto ref : *this) { |
Mathieu Chartier | 9086b65 | 2015-04-14 09:35:18 -0700 | [diff] [blame] | 458 | if (!ref->IsNull()) { |
| 459 | root_visitor.VisitRoot(*ref); |
| 460 | DCHECK(!ref->IsNull()); |
| 461 | } |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 462 | } |
| 463 | } |
| 464 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 465 | void IndirectReferenceTable::Dump(std::ostream& os) const { |
| 466 | os << kind_ << " table dump:\n"; |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 467 | ReferenceTable::Table entries; |
| 468 | for (size_t i = 0; i < Capacity(); ++i) { |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 469 | ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>(); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 470 | if (obj != nullptr) { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 471 | obj = table_[i].GetReference()->Read(); |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 472 | entries.push_back(GcRoot<mirror::Object>(obj)); |
Ian Rogers | 63818dc | 2012-09-26 12:23:04 -0700 | [diff] [blame] | 473 | } |
| 474 | } |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 475 | ReferenceTable::Dump(os, entries); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 476 | } |
| 477 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 478 | void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) { |
| 479 | if (kDebugIRT) { |
| 480 | LOG(INFO) << "Setting segment state: " |
| 481 | << segment_state_.top_index |
| 482 | << " -> " |
| 483 | << new_state.top_index; |
| 484 | } |
| 485 | segment_state_ = new_state; |
| 486 | } |
| 487 | |
Andreas Gampe | 8883108 | 2017-05-31 19:46:03 -0700 | [diff] [blame] | 488 | bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) { |
| 489 | size_t top_index = segment_state_.top_index; |
| 490 | if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) { |
| 491 | return true; |
| 492 | } |
| 493 | |
| 494 | // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end. |
| 495 | if (resizable_ == ResizableCapacity::kNo) { |
| 496 | *error_msg = "Table is not resizable"; |
| 497 | return false; |
| 498 | } |
| 499 | |
| 500 | // Try to increase the table size. |
| 501 | |
| 502 | // Would this overflow? |
| 503 | if (std::numeric_limits<size_t>::max() - free_capacity < top_index) { |
| 504 | *error_msg = "Cannot resize table, overflow."; |
| 505 | return false; |
| 506 | } |
| 507 | |
| 508 | if (!Resize(top_index + free_capacity, error_msg)) { |
| 509 | LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity |
| 510 | << "): " << std::endl |
| 511 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 512 | << " Resizing failed: " << *error_msg; |
| 513 | return false; |
| 514 | } |
| 515 | return true; |
| 516 | } |
| 517 | |
Andreas Gampe | 1b35b46 | 2017-09-29 18:52:15 -0700 | [diff] [blame] | 518 | size_t IndirectReferenceTable::FreeCapacity() const { |
Andreas Gampe | 8883108 | 2017-05-31 19:46:03 -0700 | [diff] [blame] | 519 | return max_entries_ - segment_state_.top_index; |
| 520 | } |
| 521 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 522 | } // namespace art |