blob: 3f728cbbfee5188b57c5f84f1b203bc597d06b0d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom7e93b502011-08-04 14:16:22 -070016
17#include "intern_table.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
David Sehr0225f8e2018-01-31 08:52:24 +000021#include "dex/utf.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070022#include "gc/collector/garbage_collector.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070023#include "gc/space/image_space.h"
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070024#include "gc/weak_root_state.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070025#include "gc_root-inl.h"
Andreas Gampee15b9b12018-10-29 12:54:27 -070026#include "handle_scope-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080027#include "image-inl.h"
Vladimir Marko05792b92015-08-03 11:56:49 +010028#include "mirror/dex_cache-inl.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070029#include "mirror/object-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070030#include "mirror/object_array-inl.h"
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070031#include "mirror/string-inl.h"
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070032#include "object_callbacks.h"
Andreas Gampe508fdf32017-06-05 16:42:13 -070033#include "scoped_thread_state_change-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080034#include "thread.h"
Brian Carlstrom7e93b502011-08-04 14:16:22 -070035
36namespace art {
37
Ian Rogers7dfb28c2013-08-22 08:18:36 -070038InternTable::InternTable()
Vladimir Marko1a1de672016-10-13 12:53:15 +010039 : log_new_roots_(false),
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070040 weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
41 weak_root_state_(gc::kWeakRootStateNormal) {
Mathieu Chartierc11d9b82013-09-19 10:01:59 -070042}
Elliott Hughesde69d7f2011-08-18 16:49:37 -070043
Brian Carlstroma663ea52011-08-19 23:33:41 -070044size_t InternTable::Size() const {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010045 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070046 return strong_interns_.Size() + weak_interns_.Size();
Brian Carlstroma663ea52011-08-19 23:33:41 -070047}
48
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070049size_t InternTable::StrongSize() const {
50 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070051 return strong_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070052}
53
54size_t InternTable::WeakSize() const {
55 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070056 return weak_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070057}
58
Elliott Hughescac6cc72011-11-03 20:31:21 -070059void InternTable::DumpForSigQuit(std::ostream& os) const {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070060 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
Elliott Hughescac6cc72011-11-03 20:31:21 -070061}
62
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070063void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010064 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -080065 if ((flags & kVisitRootFlagAllRoots) != 0) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070066 strong_interns_.VisitRoots(visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -080067 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070068 for (auto& root : new_strong_intern_roots_) {
Mathieu Chartier9e868092016-10-31 14:58:04 -070069 ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070070 root.VisitRoot(visitor, RootInfo(kRootInternedString));
Mathieu Chartier9e868092016-10-31 14:58:04 -070071 ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierc2e20622014-11-03 11:41:47 -080072 if (new_ref != old_ref) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070073 // The GC moved a root in the log. Need to search the strong interns and update the
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070074 // corresponding object. This is slow, but luckily for us, this may only happen with a
75 // concurrent moving GC.
Mathieu Chartiereb175f72014-10-31 11:49:27 -070076 strong_interns_.Remove(old_ref);
77 strong_interns_.Insert(new_ref);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070078 }
79 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080080 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080081 if ((flags & kVisitRootFlagClearRootLog) != 0) {
82 new_strong_intern_roots_.clear();
83 }
84 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
85 log_new_roots_ = true;
86 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
87 log_new_roots_ = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070088 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -070089 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
Brian Carlstrom7e93b502011-08-04 14:16:22 -070090}
91
Mathieu Chartier9e868092016-10-31 14:58:04 -070092ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -080093 MutexLock mu(self, *Locks::intern_table_lock_);
94 return LookupWeakLocked(s);
Mathieu Chartierf7fd9702015-11-09 11:16:49 -080095}
96
Mathieu Chartier9e868092016-10-31 14:58:04 -070097ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -080098 MutexLock mu(self, *Locks::intern_table_lock_);
99 return LookupStrongLocked(s);
100}
101
Mathieu Chartier9e868092016-10-31 14:58:04 -0700102ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000103 uint32_t utf16_length,
104 const char* utf8_data) {
105 DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
106 Utf8String string(utf16_length,
107 utf8_data,
108 ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
109 MutexLock mu(self, *Locks::intern_table_lock_);
110 return strong_interns_.Find(string);
111}
112
Mathieu Chartier9e868092016-10-31 14:58:04 -0700113ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000114 return weak_interns_.Find(s);
115}
116
Mathieu Chartier9e868092016-10-31 14:58:04 -0700117ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800118 return strong_interns_.Find(s);
119}
120
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800121void InternTable::AddNewTable() {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700122 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800123 weak_interns_.AddNewTable();
124 strong_interns_.AddNewTable();
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700125}
126
Mathieu Chartier9e868092016-10-31 14:58:04 -0700127ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100128 Runtime* runtime = Runtime::Current();
129 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700130 runtime->RecordStrongStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100131 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800132 if (log_new_roots_) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700133 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
Mathieu Chartier893263b2014-03-04 11:07:42 -0800134 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700135 strong_interns_.Insert(s);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800136 return s;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100137}
138
Mathieu Chartier9e868092016-10-31 14:58:04 -0700139ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100140 Runtime* runtime = Runtime::Current();
141 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700142 runtime->RecordWeakStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100143 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700144 weak_interns_.Insert(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700145 return s;
146}
147
Mathieu Chartier9e868092016-10-31 14:58:04 -0700148void InternTable::RemoveStrong(ObjPtr<mirror::String> s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700149 strong_interns_.Remove(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -0700150}
151
Mathieu Chartier9e868092016-10-31 14:58:04 -0700152void InternTable::RemoveWeak(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100153 Runtime* runtime = Runtime::Current();
154 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700155 runtime->RecordWeakStringRemoval(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100156 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700157 weak_interns_.Remove(s);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700158}
159
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100160// Insert/remove methods used to undo changes made during an aborted transaction.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700161ObjPtr<mirror::String> InternTable::InsertStrongFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100162 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700163 return InsertStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100164}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700165
166ObjPtr<mirror::String> InternTable::InsertWeakFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100167 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700168 return InsertWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100169}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700170
171void InternTable::RemoveStrongFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100172 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700173 RemoveStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100174}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700175
176void InternTable::RemoveWeakFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100177 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700178 RemoveWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100179}
180
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700181void InternTable::BroadcastForNewInterns() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700182 Thread* self = Thread::Current();
183 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700184 weak_intern_condition_.Broadcast(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700185}
186
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700187void InternTable::WaitUntilAccessible(Thread* self) {
188 Locks::intern_table_lock_->ExclusiveUnlock(self);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700189 {
190 ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
191 MutexLock mu(self, *Locks::intern_table_lock_);
Hiroshi Yamauchi9e6f0972016-11-03 13:03:20 -0700192 while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
193 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700194 weak_intern_condition_.Wait(self);
195 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700196 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700197 Locks::intern_table_lock_->ExclusiveLock(self);
198}
199
Mathieu Chartier9e868092016-10-31 14:58:04 -0700200ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
201 bool is_strong,
202 bool holding_locks) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800203 if (s == nullptr) {
204 return nullptr;
205 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700206 Thread* const self = Thread::Current();
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100207 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700208 if (kDebugLocking && !holding_locks) {
209 Locks::mutator_lock_->AssertSharedHeld(self);
210 CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700211 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700212 while (true) {
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700213 if (holding_locks) {
214 if (!kUseReadBarrier) {
215 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
216 } else {
217 CHECK(self->GetWeakRefAccessEnabled());
218 }
219 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700220 // Check the strong table for a match.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700221 ObjPtr<mirror::String> strong = LookupStrongLocked(s);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700222 if (strong != nullptr) {
223 return strong;
224 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700225 if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
226 (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
227 break;
228 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700229 // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
230 // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
231 // cleared.
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700232 CHECK(!holding_locks);
233 StackHandleScope<1> hs(self);
234 auto h = hs.NewHandleWrapper(&s);
235 WaitUntilAccessible(self);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700236 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700237 if (!kUseReadBarrier) {
238 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
239 } else {
240 CHECK(self->GetWeakRefAccessEnabled());
241 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800242 // There is no match in the strong table, check the weak table.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700243 ObjPtr<mirror::String> weak = LookupWeakLocked(s);
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800244 if (weak != nullptr) {
245 if (is_strong) {
246 // A match was found in the weak table. Promote to the strong table.
247 RemoveWeak(weak);
248 return InsertStrong(weak);
249 }
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700250 return weak;
251 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800252 // No match in the strong table or the weak table. Insert into the strong / weak table.
253 return is_strong ? InsertStrong(s) : InsertWeak(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700254}
255
Mathieu Chartier9e868092016-10-31 14:58:04 -0700256ObjPtr<mirror::String> InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700257 DCHECK(utf8_data != nullptr);
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100258 Thread* self = Thread::Current();
259 // Try to avoid allocation.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700260 ObjPtr<mirror::String> s = LookupStrong(self, utf16_length, utf8_data);
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100261 if (s != nullptr) {
262 return s;
263 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700264 return InternStrong(mirror::String::AllocFromModifiedUtf8(
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100265 self, utf16_length, utf8_data));
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700266}
267
Mathieu Chartier9e868092016-10-31 14:58:04 -0700268ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700269 DCHECK(utf8_data != nullptr);
270 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
Brian Carlstromc74255f2011-09-11 22:47:39 -0700271}
272
Mathieu Chartier9e868092016-10-31 14:58:04 -0700273ObjPtr<mirror::String> InternTable::InternStrongImageString(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700274 // May be holding the heap bitmap lock.
275 return Insert(s, true, true);
276}
277
Vladimir Marko8e05f092019-06-10 11:10:38 +0100278void InternTable::PromoteWeakToStrong() {
279 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
280 DCHECK_EQ(weak_interns_.tables_.size(), 1u);
281 for (GcRoot<mirror::String>& entry : weak_interns_.tables_.front().set_) {
282 DCHECK(LookupStrongLocked(entry.Read()) == nullptr);
283 InsertStrong(entry.Read());
284 }
285 weak_interns_.tables_.front().set_.clear();
286}
287
Mathieu Chartier9e868092016-10-31 14:58:04 -0700288ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700289 return Insert(s, true, false);
Brian Carlstromc74255f2011-09-11 22:47:39 -0700290}
291
Mathieu Chartier9e868092016-10-31 14:58:04 -0700292ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700293 return Insert(s, false, false);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700294}
295
Mathieu Chartier9e868092016-10-31 14:58:04 -0700296bool InternTable::ContainsWeak(ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800297 return LookupWeak(Thread::Current(), s) == s;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700298}
299
Mathieu Chartier97509952015-07-13 14:35:43 -0700300void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100301 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700302 weak_interns_.SweepWeaks(visitor);
Brian Carlstroma663ea52011-08-19 23:33:41 -0700303}
304
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700305size_t InternTable::WriteToMemory(uint8_t* ptr) {
306 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800307 return strong_interns_.WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700308}
309
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800310std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700311 if (kIsDebugBuild) {
312 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
313 }
Alexey Grebenkin21f23642016-12-02 17:44:54 +0300314 // An additional cast to prevent undesired sign extension.
315 return static_cast<size_t>(
316 static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700317}
318
319bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800320 const GcRoot<mirror::String>& b) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700321 if (kIsDebugBuild) {
322 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
323 }
Mathieu Chartier9e868092016-10-31 14:58:04 -0700324 return a.Read<kWithoutReadBarrier>()->Equals(b.Read<kWithoutReadBarrier>());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700325}
326
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000327bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
328 const Utf8String& b) const {
329 if (kIsDebugBuild) {
330 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
331 }
Mathieu Chartier9e868092016-10-31 14:58:04 -0700332 ObjPtr<mirror::String> a_string = a.Read<kWithoutReadBarrier>();
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000333 uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
334 if (a_length != b.GetUtf16Length()) {
335 return false;
336 }
jessicahandojo3aaa37b2016-07-29 14:46:37 -0700337 if (a_string->IsCompressed()) {
338 size_t b_byte_count = strlen(b.GetUtf8Data());
339 size_t b_utf8_length = CountModifiedUtf8Chars(b.GetUtf8Data(), b_byte_count);
340 // Modified UTF-8 single byte character range is 0x01 .. 0x7f
341 // The string compression occurs on regular ASCII with same exact range,
342 // not on extended ASCII which up to 0xff
343 const bool is_b_regular_ascii = (b_byte_count == b_utf8_length);
344 if (is_b_regular_ascii) {
345 return memcmp(b.GetUtf8Data(),
346 a_string->GetValueCompressed(), a_length * sizeof(uint8_t)) == 0;
347 } else {
348 return false;
349 }
350 } else {
351 const uint16_t* a_value = a_string->GetValue();
352 return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
353 }
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000354}
355
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800356size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
357 if (tables_.empty()) {
358 return 0;
359 }
360 UnorderedSet* table_to_write;
361 UnorderedSet combined;
362 if (tables_.size() > 1) {
363 table_to_write = &combined;
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700364 for (InternalTable& table : tables_) {
365 for (GcRoot<mirror::String>& string : table.set_) {
Vladimir Marko54159c62018-06-20 14:30:08 +0100366 combined.insert(string);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800367 }
368 }
369 } else {
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700370 table_to_write = &tables_.back().set_;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800371 }
372 return table_to_write->WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700373}
374
Mathieu Chartier9e868092016-10-31 14:58:04 -0700375void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700376 for (InternalTable& table : tables_) {
377 auto it = table.set_.find(GcRoot<mirror::String>(s));
378 if (it != table.set_.end()) {
379 table.set_.erase(it);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800380 return;
381 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700382 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800383 LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700384}
385
Mathieu Chartier9e868092016-10-31 14:58:04 -0700386ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700387 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700388 for (InternalTable& table : tables_) {
389 auto it = table.set_.find(GcRoot<mirror::String>(s));
390 if (it != table.set_.end()) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800391 return it->Read();
392 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700393 }
394 return nullptr;
395}
396
Mathieu Chartier9e868092016-10-31 14:58:04 -0700397ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000398 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700399 for (InternalTable& table : tables_) {
400 auto it = table.set_.find(string);
401 if (it != table.set_.end()) {
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000402 return it->Read();
403 }
404 }
405 return nullptr;
406}
407
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800408void InternTable::Table::AddNewTable() {
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700409 tables_.push_back(InternalTable());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700410}
411
Mathieu Chartier9e868092016-10-31 14:58:04 -0700412void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800413 // Always insert the last table, the image tables are before and we avoid inserting into these
414 // to prevent dirty pages.
415 DCHECK(!tables_.empty());
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700416 tables_.back().set_.insert(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700417}
418
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700419void InternTable::Table::VisitRoots(RootVisitor* visitor) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700420 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
421 visitor, RootInfo(kRootInternedString));
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700422 for (InternalTable& table : tables_) {
423 for (auto& intern : table.set_) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800424 buffered_visitor.VisitRoot(intern);
425 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700426 }
427}
428
Mathieu Chartier97509952015-07-13 14:35:43 -0700429void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700430 for (InternalTable& table : tables_) {
431 SweepWeaks(&table.set_, visitor);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800432 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700433}
434
Mathieu Chartier97509952015-07-13 14:35:43 -0700435void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700436 for (auto it = set->begin(), end = set->end(); it != end;) {
437 // This does not need a read barrier because this is called by GC.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800438 mirror::Object* object = it->Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700439 mirror::Object* new_object = visitor->IsMarked(object);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700440 if (new_object == nullptr) {
Vladimir Marko54159c62018-06-20 14:30:08 +0100441 it = set->erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700442 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800443 *it = GcRoot<mirror::String>(new_object->AsString());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700444 ++it;
445 }
446 }
447}
448
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800449size_t InternTable::Table::Size() const {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800450 return std::accumulate(tables_.begin(),
451 tables_.end(),
Mathieu Chartier205b7622016-01-06 15:47:09 -0800452 0U,
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700453 [](size_t sum, const InternalTable& table) {
454 return sum + table.Size();
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800455 });
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800456}
457
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700458void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
459 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
460 ChangeWeakRootStateLocked(new_state);
461}
462
463void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700464 CHECK(!kUseReadBarrier);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700465 weak_root_state_ = new_state;
466 if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
467 weak_intern_condition_.Broadcast(Thread::Current());
468 }
469}
470
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700471InternTable::Table::Table() {
472 Runtime* const runtime = Runtime::Current();
Mathieu Chartier8cc418e2018-10-31 10:54:30 -0700473 InternalTable initial_table;
474 initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
475 runtime->GetHashTableMaxLoadFactor());
476 tables_.push_back(std::move(initial_table));
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700477}
478
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700479} // namespace art