Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jit_code_cache.h" |
| 18 | |
| 19 | #include <sstream> |
| 20 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 21 | #include "art_method-inl.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 22 | #include "entrypoints/runtime_asm_entrypoints.h" |
| 23 | #include "gc/accounting/bitmap-inl.h" |
| 24 | #include "linear_alloc.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 25 | #include "mem_map.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 26 | #include "oat_file-inl.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 27 | #include "thread_list.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 28 | |
| 29 | namespace art { |
| 30 | namespace jit { |
| 31 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 32 | static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC; |
| 33 | static constexpr int kProtData = PROT_READ | PROT_WRITE; |
| 34 | static constexpr int kProtCode = PROT_READ | PROT_EXEC; |
| 35 | |
| 36 | #define CHECKED_MPROTECT(memory, size, prot) \ |
| 37 | do { \ |
| 38 | int rc = mprotect(memory, size, prot); \ |
| 39 | if (UNLIKELY(rc != 0)) { \ |
| 40 | errno = rc; \ |
| 41 | PLOG(FATAL) << "Failed to mprotect jit code cache"; \ |
| 42 | } \ |
| 43 | } while (false) \ |
| 44 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 45 | JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) { |
| 46 | CHECK_GT(capacity, 0U); |
| 47 | CHECK_LT(capacity, kMaxCapacity); |
| 48 | std::string error_str; |
| 49 | // Map name specific for android_os_Debug.cpp accounting. |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 50 | MemMap* data_map = MemMap::MapAnonymous( |
| 51 | "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str); |
| 52 | if (data_map == nullptr) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 53 | std::ostringstream oss; |
| 54 | oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity; |
| 55 | *error_msg = oss.str(); |
| 56 | return nullptr; |
| 57 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 58 | |
| 59 | // Data cache is 1 / 4 of the map. |
| 60 | // TODO: Make this variable? |
| 61 | size_t data_size = RoundUp(data_map->Size() / 4, kPageSize); |
| 62 | size_t code_size = data_map->Size() - data_size; |
| 63 | uint8_t* divider = data_map->Begin() + data_size; |
| 64 | |
| 65 | // We need to have 32 bit offsets from method headers in code cache which point to things |
| 66 | // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. |
| 67 | MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str); |
| 68 | if (code_map == nullptr) { |
| 69 | std::ostringstream oss; |
| 70 | oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity; |
| 71 | *error_msg = oss.str(); |
| 72 | return nullptr; |
| 73 | } |
| 74 | DCHECK_EQ(code_map->Size(), code_size); |
| 75 | DCHECK_EQ(code_map->Begin(), divider); |
| 76 | return new JitCodeCache(code_map, data_map); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 77 | } |
| 78 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 79 | JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map) |
| 80 | : lock_("Jit code cache", kJitCodeCacheLock), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 81 | lock_cond_("Jit code cache variable", lock_), |
| 82 | collection_in_progress_(false), |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 83 | code_map_(code_map), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 84 | data_map_(data_map) { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 85 | |
| 86 | code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/); |
| 87 | data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/); |
| 88 | |
| 89 | if (code_mspace_ == nullptr || data_mspace_ == nullptr) { |
| 90 | PLOG(FATAL) << "create_mspace_with_base failed"; |
| 91 | } |
| 92 | |
| 93 | // Prevent morecore requests from the mspace. |
| 94 | mspace_set_footprint_limit(code_mspace_, code_map_->Size()); |
| 95 | mspace_set_footprint_limit(data_mspace_, data_map_->Size()); |
| 96 | |
| 97 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); |
| 98 | CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 99 | |
| 100 | live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap", |
| 101 | reinterpret_cast<uintptr_t>(code_map_->Begin()), |
| 102 | reinterpret_cast<uintptr_t>(code_map_->End()))); |
| 103 | |
| 104 | if (live_bitmap_.get() == nullptr) { |
| 105 | PLOG(FATAL) << "creating bitmaps for the JIT code cache failed"; |
| 106 | } |
| 107 | |
| 108 | VLOG(jit) << "Created jit code cache: data size=" |
| 109 | << PrettySize(data_map_->Size()) |
| 110 | << ", code size=" |
| 111 | << PrettySize(code_map_->Size()); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 112 | } |
| 113 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 114 | bool JitCodeCache::ContainsPc(const void* ptr) const { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 115 | return code_map_->Begin() <= ptr && ptr < code_map_->End(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 116 | } |
| 117 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 118 | class ScopedCodeCacheWrite { |
| 119 | public: |
| 120 | explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) { |
| 121 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 122 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 123 | ~ScopedCodeCacheWrite() { |
| 124 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); |
| 125 | } |
| 126 | private: |
| 127 | MemMap* const code_map_; |
| 128 | |
| 129 | DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite); |
| 130 | }; |
| 131 | |
| 132 | uint8_t* JitCodeCache::CommitCode(Thread* self, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 133 | ArtMethod* method, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 134 | const uint8_t* mapping_table, |
| 135 | const uint8_t* vmap_table, |
| 136 | const uint8_t* gc_map, |
| 137 | size_t frame_size_in_bytes, |
| 138 | size_t core_spill_mask, |
| 139 | size_t fp_spill_mask, |
| 140 | const uint8_t* code, |
| 141 | size_t code_size) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 142 | uint8_t* result = CommitCodeInternal(self, |
| 143 | method, |
| 144 | mapping_table, |
| 145 | vmap_table, |
| 146 | gc_map, |
| 147 | frame_size_in_bytes, |
| 148 | core_spill_mask, |
| 149 | fp_spill_mask, |
| 150 | code, |
| 151 | code_size); |
| 152 | if (result == nullptr) { |
| 153 | // Retry. |
| 154 | GarbageCollectCache(self); |
| 155 | result = CommitCodeInternal(self, |
| 156 | method, |
| 157 | mapping_table, |
| 158 | vmap_table, |
| 159 | gc_map, |
| 160 | frame_size_in_bytes, |
| 161 | core_spill_mask, |
| 162 | fp_spill_mask, |
| 163 | code, |
| 164 | code_size); |
| 165 | } |
| 166 | return result; |
| 167 | } |
| 168 | |
| 169 | bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { |
| 170 | bool in_collection = false; |
| 171 | while (collection_in_progress_) { |
| 172 | in_collection = true; |
| 173 | lock_cond_.Wait(self); |
| 174 | } |
| 175 | return in_collection; |
| 176 | } |
| 177 | |
| 178 | static uintptr_t FromCodeToAllocation(const void* code) { |
| 179 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 180 | return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 181 | } |
| 182 | |
| 183 | void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) { |
| 184 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 185 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 186 | const uint8_t* data = method_header->GetNativeGcMap(); |
| 187 | if (data != nullptr) { |
| 188 | mspace_free(data_mspace_, const_cast<uint8_t*>(data)); |
| 189 | } |
| 190 | data = method_header->GetMappingTable(); |
| 191 | if (data != nullptr) { |
| 192 | mspace_free(data_mspace_, const_cast<uint8_t*>(data)); |
| 193 | } |
| 194 | // Use the offset directly to prevent sanity check that the method is |
| 195 | // compiled with optimizing. |
| 196 | // TODO(ngeoffray): Clean up. |
| 197 | if (method_header->vmap_table_offset_ != 0) { |
| 198 | data = method_header->code_ - method_header->vmap_table_offset_; |
| 199 | mspace_free(data_mspace_, const_cast<uint8_t*>(data)); |
| 200 | } |
| 201 | mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation)); |
| 202 | } |
| 203 | |
| 204 | void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { |
| 205 | MutexLock mu(self, lock_); |
| 206 | // We do not check if a code cache GC is in progress, as this method comes |
| 207 | // with the classlinker_classes_lock_ held, and suspending ourselves could |
| 208 | // lead to a deadlock. |
| 209 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 210 | if (alloc.ContainsUnsafe(it->second)) { |
| 211 | FreeCode(it->first, it->second); |
| 212 | it = method_code_map_.erase(it); |
| 213 | } else { |
| 214 | ++it; |
| 215 | } |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, |
| 220 | ArtMethod* method, |
| 221 | const uint8_t* mapping_table, |
| 222 | const uint8_t* vmap_table, |
| 223 | const uint8_t* gc_map, |
| 224 | size_t frame_size_in_bytes, |
| 225 | size_t core_spill_mask, |
| 226 | size_t fp_spill_mask, |
| 227 | const uint8_t* code, |
| 228 | size_t code_size) { |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 229 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 230 | // Ensure the header ends up at expected instruction alignment. |
| 231 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 232 | size_t total_size = header_size + code_size; |
| 233 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 234 | OatQuickMethodHeader* method_header = nullptr; |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 235 | uint8_t* code_ptr = nullptr; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 236 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 237 | ScopedThreadSuspension sts(self, kSuspended); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 238 | MutexLock mu(self, lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 239 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 240 | { |
| 241 | ScopedCodeCacheWrite scc(code_map_.get()); |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 242 | uint8_t* result = reinterpret_cast<uint8_t*>( |
| 243 | mspace_memalign(code_mspace_, alignment, total_size)); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 244 | if (result == nullptr) { |
| 245 | return nullptr; |
| 246 | } |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 247 | code_ptr = result + header_size; |
| 248 | DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 249 | |
| 250 | std::copy(code, code + code_size, code_ptr); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 251 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 252 | new (method_header) OatQuickMethodHeader( |
| 253 | (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, |
| 254 | (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, |
| 255 | (gc_map == nullptr) ? 0 : code_ptr - gc_map, |
| 256 | frame_size_in_bytes, |
| 257 | core_spill_mask, |
| 258 | fp_spill_mask, |
| 259 | code_size); |
| 260 | } |
| 261 | |
| 262 | __builtin___clear_cache(reinterpret_cast<char*>(code_ptr), |
| 263 | reinterpret_cast<char*>(code_ptr + code_size)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 264 | method_code_map_.Put(code_ptr, method); |
| 265 | // We have checked there was no collection in progress earlier. If we |
| 266 | // were, setting the entry point of a method would be unsafe, as the collection |
| 267 | // could delete it. |
| 268 | DCHECK(!collection_in_progress_); |
| 269 | method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint()); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 270 | return reinterpret_cast<uint8_t*>(method_header); |
| 271 | } |
| 272 | |
| 273 | size_t JitCodeCache::CodeCacheSize() { |
| 274 | MutexLock mu(Thread::Current(), lock_); |
| 275 | size_t bytes_allocated = 0; |
| 276 | mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); |
| 277 | return bytes_allocated; |
| 278 | } |
| 279 | |
| 280 | size_t JitCodeCache::DataCacheSize() { |
| 281 | MutexLock mu(Thread::Current(), lock_); |
| 282 | size_t bytes_allocated = 0; |
| 283 | mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); |
| 284 | return bytes_allocated; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 285 | } |
| 286 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 287 | size_t JitCodeCache::NumberOfCompiledCode() { |
| 288 | MutexLock mu(Thread::Current(), lock_); |
| 289 | return method_code_map_.size(); |
| 290 | } |
| 291 | |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 292 | uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) { |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 293 | size = RoundUp(size, sizeof(void*)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 294 | uint8_t* result = nullptr; |
| 295 | |
| 296 | { |
| 297 | ScopedThreadSuspension sts(self, kSuspended); |
| 298 | MutexLock mu(self, lock_); |
| 299 | WaitForPotentialCollectionToComplete(self); |
| 300 | result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size)); |
| 301 | } |
| 302 | |
| 303 | if (result == nullptr) { |
| 304 | // Retry. |
| 305 | GarbageCollectCache(self); |
| 306 | ScopedThreadSuspension sts(self, kSuspended); |
| 307 | MutexLock mu(self, lock_); |
| 308 | WaitForPotentialCollectionToComplete(self); |
| 309 | result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size)); |
| 310 | } |
| 311 | |
| 312 | return result; |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 313 | } |
| 314 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 315 | uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 316 | uint8_t* result = ReserveData(self, end - begin); |
| 317 | if (result == nullptr) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 318 | return nullptr; // Out of space in the data cache. |
| 319 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 320 | std::copy(begin, end, result); |
| 321 | return result; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 322 | } |
| 323 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 324 | class MarkCodeVisitor FINAL : public StackVisitor { |
| 325 | public: |
| 326 | MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in) |
| 327 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames), |
| 328 | code_cache_(code_cache_in), |
| 329 | bitmap_(code_cache_->GetLiveBitmap()) {} |
| 330 | |
| 331 | bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { |
| 332 | const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); |
| 333 | if (method_header == nullptr) { |
| 334 | return true; |
| 335 | } |
| 336 | const void* code = method_header->GetCode(); |
| 337 | if (code_cache_->ContainsPc(code)) { |
| 338 | // Use the atomic set version, as multiple threads are executing this code. |
| 339 | bitmap_->AtomicTestAndSet(FromCodeToAllocation(code)); |
| 340 | } |
| 341 | return true; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 342 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 343 | |
| 344 | private: |
| 345 | JitCodeCache* const code_cache_; |
| 346 | CodeCacheBitmap* const bitmap_; |
| 347 | }; |
| 348 | |
| 349 | class MarkCodeClosure FINAL : public Closure { |
| 350 | public: |
| 351 | MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier) |
| 352 | : code_cache_(code_cache), barrier_(barrier) {} |
| 353 | |
| 354 | void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { |
| 355 | DCHECK(thread == Thread::Current() || thread->IsSuspended()); |
| 356 | MarkCodeVisitor visitor(thread, code_cache_); |
| 357 | visitor.WalkStack(); |
| 358 | if (thread->GetState() == kRunnable) { |
| 359 | barrier_->Pass(Thread::Current()); |
| 360 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 361 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 362 | |
| 363 | private: |
| 364 | JitCodeCache* const code_cache_; |
| 365 | Barrier* const barrier_; |
| 366 | }; |
| 367 | |
| 368 | void JitCodeCache::GarbageCollectCache(Thread* self) { |
| 369 | if (!kIsDebugBuild || VLOG_IS_ON(jit)) { |
| 370 | LOG(INFO) << "Clearing code cache, code=" |
| 371 | << PrettySize(CodeCacheSize()) |
| 372 | << ", data=" << PrettySize(DataCacheSize()); |
| 373 | } |
| 374 | |
| 375 | size_t map_size = 0; |
| 376 | ScopedThreadSuspension sts(self, kSuspended); |
| 377 | |
| 378 | // Walk over all compiled methods and set the entry points of these |
| 379 | // methods to interpreter. |
| 380 | { |
| 381 | MutexLock mu(self, lock_); |
| 382 | if (WaitForPotentialCollectionToComplete(self)) { |
| 383 | return; |
| 384 | } |
| 385 | collection_in_progress_ = true; |
| 386 | map_size = method_code_map_.size(); |
| 387 | for (auto& it : method_code_map_) { |
| 388 | it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | // Run a checkpoint on all threads to mark the JIT compiled code they are running. |
| 393 | { |
| 394 | Barrier barrier(0); |
| 395 | MarkCodeClosure closure(this, &barrier); |
| 396 | size_t threads_running_checkpoint = |
| 397 | Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
| 398 | if (threads_running_checkpoint != 0) { |
| 399 | barrier.Increment(self, threads_running_checkpoint); |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | // Free unused compiled code, and restore the entry point of used compiled code. |
| 404 | { |
| 405 | MutexLock mu(self, lock_); |
| 406 | DCHECK_EQ(map_size, method_code_map_.size()); |
| 407 | ScopedCodeCacheWrite scc(code_map_.get()); |
| 408 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 409 | const void* code_ptr = it->first; |
| 410 | ArtMethod* method = it->second; |
| 411 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 412 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 413 | if (GetLiveBitmap()->Test(allocation)) { |
| 414 | method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint()); |
| 415 | ++it; |
| 416 | } else { |
| 417 | method->ClearCounter(); |
| 418 | DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint()); |
| 419 | FreeCode(code_ptr, method); |
| 420 | it = method_code_map_.erase(it); |
| 421 | } |
| 422 | } |
| 423 | GetLiveBitmap()->Bitmap::Clear(); |
| 424 | collection_in_progress_ = false; |
| 425 | lock_cond_.Broadcast(self); |
| 426 | } |
| 427 | |
| 428 | if (!kIsDebugBuild || VLOG_IS_ON(jit)) { |
| 429 | LOG(INFO) << "After clearing code cache, code=" |
| 430 | << PrettySize(CodeCacheSize()) |
| 431 | << ", data=" << PrettySize(DataCacheSize()); |
| 432 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 433 | } |
| 434 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 435 | |
| 436 | OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { |
| 437 | static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA"); |
| 438 | if (kRuntimeISA == kArm) { |
| 439 | // On Thumb-2, the pc is offset by one. |
| 440 | --pc; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 441 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 442 | if (!ContainsPc(reinterpret_cast<const void*>(pc))) { |
| 443 | return nullptr; |
| 444 | } |
| 445 | |
| 446 | MutexLock mu(Thread::Current(), lock_); |
| 447 | if (method_code_map_.empty()) { |
| 448 | return nullptr; |
| 449 | } |
| 450 | auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc)); |
| 451 | --it; |
| 452 | |
| 453 | const void* code_ptr = it->first; |
| 454 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 455 | if (!method_header->Contains(pc)) { |
| 456 | return nullptr; |
| 457 | } |
| 458 | DCHECK_EQ(it->second, method) |
| 459 | << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc; |
| 460 | return method_header; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | } // namespace jit |
| 464 | } // namespace art |