blob: 015bf98e388076d29b8a7509284adedf2897931d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom7e93b502011-08-04 14:16:22 -070016
17#include "intern_table.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "gc_root-inl.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070022#include "gc/collector/garbage_collector.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070023#include "gc/space/image_space.h"
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070024#include "gc/weak_root_state.h"
Vladimir Marko05792b92015-08-03 11:56:49 +010025#include "mirror/dex_cache-inl.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070026#include "mirror/object_array-inl.h"
27#include "mirror/object-inl.h"
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070028#include "mirror/string-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080029#include "thread.h"
Elliott Hughes814e4032011-08-23 12:07:56 -070030#include "utf.h"
Brian Carlstrom7e93b502011-08-04 14:16:22 -070031
32namespace art {
33
Ian Rogers7dfb28c2013-08-22 08:18:36 -070034InternTable::InternTable()
Mathieu Chartierea0831f2015-12-29 13:17:37 -080035 : images_added_to_intern_table_(false),
36 log_new_roots_(false),
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070037 weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
38 weak_root_state_(gc::kWeakRootStateNormal) {
Mathieu Chartierc11d9b82013-09-19 10:01:59 -070039}
Elliott Hughesde69d7f2011-08-18 16:49:37 -070040
Brian Carlstroma663ea52011-08-19 23:33:41 -070041size_t InternTable::Size() const {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010042 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070043 return strong_interns_.Size() + weak_interns_.Size();
Brian Carlstroma663ea52011-08-19 23:33:41 -070044}
45
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070046size_t InternTable::StrongSize() const {
47 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070048 return strong_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070049}
50
51size_t InternTable::WeakSize() const {
52 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070053 return weak_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070054}
55
Elliott Hughescac6cc72011-11-03 20:31:21 -070056void InternTable::DumpForSigQuit(std::ostream& os) const {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070057 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
Elliott Hughescac6cc72011-11-03 20:31:21 -070058}
59
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070060void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010061 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -080062 if ((flags & kVisitRootFlagAllRoots) != 0) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070063 strong_interns_.VisitRoots(visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -080064 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070065 for (auto& root : new_strong_intern_roots_) {
66 mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070067 root.VisitRoot(visitor, RootInfo(kRootInternedString));
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070068 mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierc2e20622014-11-03 11:41:47 -080069 if (new_ref != old_ref) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070070 // The GC moved a root in the log. Need to search the strong interns and update the
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070071 // corresponding object. This is slow, but luckily for us, this may only happen with a
72 // concurrent moving GC.
Mathieu Chartiereb175f72014-10-31 11:49:27 -070073 strong_interns_.Remove(old_ref);
74 strong_interns_.Insert(new_ref);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070075 }
76 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080077 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080078 if ((flags & kVisitRootFlagClearRootLog) != 0) {
79 new_strong_intern_roots_.clear();
80 }
81 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
82 log_new_roots_ = true;
83 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
84 log_new_roots_ = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070085 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -070086 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
Brian Carlstrom7e93b502011-08-04 14:16:22 -070087}
88
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +000089mirror::String* InternTable::LookupStrong(mirror::String* s) {
Mathieu Chartierf7fd9702015-11-09 11:16:49 -080090 return strong_interns_.Find(s);
91}
92
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +000093mirror::String* InternTable::LookupWeak(mirror::String* s) {
94 return weak_interns_.Find(s);
95}
96
Mathieu Chartierea0831f2015-12-29 13:17:37 -080097void InternTable::AddNewTable() {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070098 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -080099 weak_interns_.AddNewTable();
100 strong_interns_.AddNewTable();
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700101}
102
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700103mirror::String* InternTable::InsertStrong(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100104 Runtime* runtime = Runtime::Current();
105 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700106 runtime->RecordStrongStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100107 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800108 if (log_new_roots_) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700109 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
Mathieu Chartier893263b2014-03-04 11:07:42 -0800110 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700111 strong_interns_.Insert(s);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800112 return s;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100113}
114
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700115mirror::String* InternTable::InsertWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100116 Runtime* runtime = Runtime::Current();
117 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700118 runtime->RecordWeakStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100119 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700120 weak_interns_.Insert(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700121 return s;
122}
123
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700124void InternTable::RemoveStrong(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700125 strong_interns_.Remove(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -0700126}
127
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700128void InternTable::RemoveWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100129 Runtime* runtime = Runtime::Current();
130 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700131 runtime->RecordWeakStringRemoval(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100132 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700133 weak_interns_.Remove(s);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700134}
135
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100136// Insert/remove methods used to undo changes made during an aborted transaction.
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700137mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100138 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700139 return InsertStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100140}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700141mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100142 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700143 return InsertWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100144}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700145void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100146 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700147 RemoveStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100148}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700149void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100150 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700151 RemoveWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100152}
153
Mathieu Chartier205b7622016-01-06 15:47:09 -0800154void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700155 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier205b7622016-01-06 15:47:09 -0800156 for (gc::space::ImageSpace* image_space : image_spaces) {
157 const ImageHeader* const header = &image_space->GetImageHeader();
158 // Check if we have the interned strings section.
159 const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
160 if (section.Size() > 0) {
161 AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
162 } else {
163 // TODO: Delete this logic?
164 mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
165 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
166 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
167 mirror::DexCache* dex_cache = dex_caches->Get(i);
168 const size_t num_strings = dex_cache->NumStrings();
169 for (size_t j = 0; j < num_strings; ++j) {
170 mirror::String* image_string = dex_cache->GetResolvedString(j);
171 if (image_string != nullptr) {
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000172 mirror::String* found = LookupStrong(image_string);
Mathieu Chartier205b7622016-01-06 15:47:09 -0800173 if (found == nullptr) {
174 InsertStrong(image_string);
175 } else {
176 DCHECK_EQ(found, image_string);
177 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700178 }
179 }
180 }
181 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700182 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800183 images_added_to_intern_table_ = true;
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700184}
185
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700186mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
Mathieu Chartier205b7622016-01-06 15:47:09 -0800187 DCHECK(!images_added_to_intern_table_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800188 const std::vector<gc::space::ImageSpace*>& image_spaces =
Jeff Haodcdc85b2015-12-04 14:06:18 -0800189 Runtime::Current()->GetHeap()->GetBootImageSpaces();
190 if (image_spaces.empty()) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700191 return nullptr; // No image present.
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700192 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700193 const std::string utf8 = s->ToModifiedUtf8();
Jeff Haodcdc85b2015-12-04 14:06:18 -0800194 for (gc::space::ImageSpace* image_space : image_spaces) {
195 mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
196 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
197 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
198 mirror::DexCache* dex_cache = dex_caches->Get(i);
199 const DexFile* dex_file = dex_cache->GetDexFile();
200 // Binary search the dex file for the string index.
201 const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
202 if (string_id != nullptr) {
203 uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
204 // GetResolvedString() contains a RB.
205 mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
206 if (image_string != nullptr) {
207 return image_string;
208 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700209 }
210 }
211 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800212 return nullptr;
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700213}
214
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700215void InternTable::BroadcastForNewInterns() {
216 CHECK(kUseReadBarrier);
217 Thread* self = Thread::Current();
218 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700219 weak_intern_condition_.Broadcast(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700220}
221
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700222void InternTable::WaitUntilAccessible(Thread* self) {
223 Locks::intern_table_lock_->ExclusiveUnlock(self);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700224 {
225 ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
226 MutexLock mu(self, *Locks::intern_table_lock_);
227 while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
228 weak_intern_condition_.Wait(self);
229 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700230 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700231 Locks::intern_table_lock_->ExclusiveLock(self);
232}
233
234mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800235 if (s == nullptr) {
236 return nullptr;
237 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700238 Thread* const self = Thread::Current();
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100239 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700240 if (kDebugLocking && !holding_locks) {
241 Locks::mutator_lock_->AssertSharedHeld(self);
242 CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700243 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700244 while (true) {
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700245 if (holding_locks) {
246 if (!kUseReadBarrier) {
247 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
248 } else {
249 CHECK(self->GetWeakRefAccessEnabled());
250 }
251 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700252 // Check the strong table for a match.
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000253 mirror::String* strong = LookupStrong(s);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700254 if (strong != nullptr) {
255 return strong;
256 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700257 if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
258 (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
259 break;
260 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700261 // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
262 // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
263 // cleared.
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700264 CHECK(!holding_locks);
265 StackHandleScope<1> hs(self);
266 auto h = hs.NewHandleWrapper(&s);
267 WaitUntilAccessible(self);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700268 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700269 if (!kUseReadBarrier) {
270 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
271 } else {
272 CHECK(self->GetWeakRefAccessEnabled());
273 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800274 // There is no match in the strong table, check the weak table.
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000275 mirror::String* weak = LookupWeak(s);
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800276 if (weak != nullptr) {
277 if (is_strong) {
278 // A match was found in the weak table. Promote to the strong table.
279 RemoveWeak(weak);
280 return InsertStrong(weak);
281 }
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700282 return weak;
283 }
nikolay serdjuka446d862015-04-17 19:27:56 +0600284 // Check the image for a match.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800285 if (!images_added_to_intern_table_) {
286 mirror::String* const image_string = LookupStringFromImage(s);
287 if (image_string != nullptr) {
288 return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
289 }
nikolay serdjuka446d862015-04-17 19:27:56 +0600290 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800291 // No match in the strong table or the weak table. Insert into the strong / weak table.
292 return is_strong ? InsertStrong(s) : InsertWeak(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700293}
294
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700295mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
296 DCHECK(utf8_data != nullptr);
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700297 return InternStrong(mirror::String::AllocFromModifiedUtf8(
298 Thread::Current(), utf16_length, utf8_data));
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700299}
300
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800301mirror::String* InternTable::InternStrong(const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700302 DCHECK(utf8_data != nullptr);
303 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
Brian Carlstromc74255f2011-09-11 22:47:39 -0700304}
305
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700306mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700307 // May be holding the heap bitmap lock.
308 return Insert(s, true, true);
309}
310
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800311mirror::String* InternTable::InternStrong(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700312 return Insert(s, true, false);
Brian Carlstromc74255f2011-09-11 22:47:39 -0700313}
314
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800315mirror::String* InternTable::InternWeak(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700316 return Insert(s, false, false);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700317}
318
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800319bool InternTable::ContainsWeak(mirror::String* s) {
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000320 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
321 return LookupWeak(s) == s;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700322}
323
Mathieu Chartier97509952015-07-13 14:35:43 -0700324void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100325 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700326 weak_interns_.SweepWeaks(visitor);
Brian Carlstroma663ea52011-08-19 23:33:41 -0700327}
328
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800329size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700330 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800331 return AddTableFromMemoryLocked(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700332}
333
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800334size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
335 return strong_interns_.AddTableFromMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700336}
337
338size_t InternTable::WriteToMemory(uint8_t* ptr) {
339 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800340 return strong_interns_.WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700341}
342
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800343std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700344 if (kIsDebugBuild) {
345 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
346 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800347 return static_cast<size_t>(root.Read()->GetHashCode());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700348}
349
350bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800351 const GcRoot<mirror::String>& b) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700352 if (kIsDebugBuild) {
353 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
354 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800355 return a.Read()->Equals(b.Read());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700356}
357
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800358size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700359 size_t read_count = 0;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800360 UnorderedSet set(ptr, /*make copy*/false, &read_count);
361 // TODO: Disable this for app images if app images have intern tables.
362 static constexpr bool kCheckDuplicates = true;
363 if (kCheckDuplicates) {
364 for (GcRoot<mirror::String>& string : set) {
365 CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
366 }
367 }
368 // Insert at the front since we insert into the back.
369 tables_.insert(tables_.begin(), std::move(set));
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700370 return read_count;
371}
372
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800373size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
374 if (tables_.empty()) {
375 return 0;
376 }
377 UnorderedSet* table_to_write;
378 UnorderedSet combined;
379 if (tables_.size() > 1) {
380 table_to_write = &combined;
381 for (UnorderedSet& table : tables_) {
382 for (GcRoot<mirror::String>& string : table) {
383 combined.Insert(string);
384 }
385 }
386 } else {
387 table_to_write = &tables_.back();
388 }
389 return table_to_write->WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700390}
391
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700392void InternTable::Table::Remove(mirror::String* s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800393 for (UnorderedSet& table : tables_) {
394 auto it = table.Find(GcRoot<mirror::String>(s));
395 if (it != table.end()) {
396 table.Erase(it);
397 return;
398 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700399 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800400 LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700401}
402
403mirror::String* InternTable::Table::Find(mirror::String* s) {
404 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800405 for (UnorderedSet& table : tables_) {
406 auto it = table.Find(GcRoot<mirror::String>(s));
407 if (it != table.end()) {
408 return it->Read();
409 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700410 }
411 return nullptr;
412}
413
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800414void InternTable::Table::AddNewTable() {
415 tables_.push_back(UnorderedSet());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700416}
417
418void InternTable::Table::Insert(mirror::String* s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800419 // Always insert the last table, the image tables are before and we avoid inserting into these
420 // to prevent dirty pages.
421 DCHECK(!tables_.empty());
422 tables_.back().Insert(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700423}
424
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700425void InternTable::Table::VisitRoots(RootVisitor* visitor) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700426 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
427 visitor, RootInfo(kRootInternedString));
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800428 for (UnorderedSet& table : tables_) {
429 for (auto& intern : table) {
430 buffered_visitor.VisitRoot(intern);
431 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700432 }
433}
434
Mathieu Chartier97509952015-07-13 14:35:43 -0700435void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800436 for (UnorderedSet& table : tables_) {
437 SweepWeaks(&table, visitor);
438 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700439}
440
Mathieu Chartier97509952015-07-13 14:35:43 -0700441void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700442 for (auto it = set->begin(), end = set->end(); it != end;) {
443 // This does not need a read barrier because this is called by GC.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800444 mirror::Object* object = it->Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700445 mirror::Object* new_object = visitor->IsMarked(object);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700446 if (new_object == nullptr) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800447 it = set->Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700448 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800449 *it = GcRoot<mirror::String>(new_object->AsString());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700450 ++it;
451 }
452 }
453}
454
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800455size_t InternTable::Table::Size() const {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800456 return std::accumulate(tables_.begin(),
457 tables_.end(),
Mathieu Chartier205b7622016-01-06 15:47:09 -0800458 0U,
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800459 [](size_t sum, const UnorderedSet& set) {
460 return sum + set.Size();
461 });
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800462}
463
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700464void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
465 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
466 ChangeWeakRootStateLocked(new_state);
467}
468
469void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700470 CHECK(!kUseReadBarrier);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700471 weak_root_state_ = new_state;
472 if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
473 weak_intern_condition_.Broadcast(Thread::Current());
474 }
475}
476
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700477InternTable::Table::Table() {
478 Runtime* const runtime = Runtime::Current();
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800479 // Initial table.
480 tables_.push_back(UnorderedSet());
481 tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
482 runtime->GetHashTableMaxLoadFactor());
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700483}
484
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700485} // namespace art