Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jit_code_cache.h" |
| 18 | |
| 19 | #include <sstream> |
| 20 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 21 | #include "art_method-inl.h" |
Calin Juravle | 66f5523 | 2015-12-08 15:09:10 +0000 | [diff] [blame] | 22 | #include "base/stl_util.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 23 | #include "base/systrace.h" |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 24 | #include "base/time_utils.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 25 | #include "debugger_interface.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 26 | #include "entrypoints/runtime_asm_entrypoints.h" |
| 27 | #include "gc/accounting/bitmap-inl.h" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 28 | #include "jit/jit.h" |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 29 | #include "jit/profiling_info.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 30 | #include "linear_alloc.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 31 | #include "mem_map.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 32 | #include "oat_file-inl.h" |
Nicolas Geoffray | 6262340 | 2015-10-28 19:15:05 +0000 | [diff] [blame] | 33 | #include "scoped_thread_state_change.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 34 | #include "thread_list.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 35 | |
| 36 | namespace art { |
| 37 | namespace jit { |
| 38 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 39 | static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC; |
| 40 | static constexpr int kProtData = PROT_READ | PROT_WRITE; |
| 41 | static constexpr int kProtCode = PROT_READ | PROT_EXEC; |
| 42 | |
| 43 | #define CHECKED_MPROTECT(memory, size, prot) \ |
| 44 | do { \ |
| 45 | int rc = mprotect(memory, size, prot); \ |
| 46 | if (UNLIKELY(rc != 0)) { \ |
| 47 | errno = rc; \ |
| 48 | PLOG(FATAL) << "Failed to mprotect jit code cache"; \ |
| 49 | } \ |
| 50 | } while (false) \ |
| 51 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 52 | JitCodeCache* JitCodeCache::Create(size_t initial_capacity, |
| 53 | size_t max_capacity, |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 54 | bool generate_debug_info, |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 55 | std::string* error_msg) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 56 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 57 | CHECK_GE(max_capacity, initial_capacity); |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 58 | |
| 59 | // Generating debug information is mostly for using the 'perf' tool, which does |
| 60 | // not work with ashmem. |
| 61 | bool use_ashmem = !generate_debug_info; |
| 62 | // With 'perf', we want a 1-1 mapping between an address and a method. |
| 63 | bool garbage_collect_code = !generate_debug_info; |
| 64 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 65 | // We need to have 32 bit offsets from method headers in code cache which point to things |
| 66 | // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. |
| 67 | // Ensure we're below 1 GB to be safe. |
| 68 | if (max_capacity > 1 * GB) { |
| 69 | std::ostringstream oss; |
| 70 | oss << "Maxium code cache capacity is limited to 1 GB, " |
| 71 | << PrettySize(max_capacity) << " is too big"; |
| 72 | *error_msg = oss.str(); |
| 73 | return nullptr; |
| 74 | } |
| 75 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 76 | std::string error_str; |
| 77 | // Map name specific for android_os_Debug.cpp accounting. |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 78 | MemMap* data_map = MemMap::MapAnonymous( |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 79 | "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str, use_ashmem); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 80 | if (data_map == nullptr) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 81 | std::ostringstream oss; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 82 | oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 83 | *error_msg = oss.str(); |
| 84 | return nullptr; |
| 85 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 86 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 87 | // Align both capacities to page size, as that's the unit mspaces use. |
| 88 | initial_capacity = RoundDown(initial_capacity, 2 * kPageSize); |
| 89 | max_capacity = RoundDown(max_capacity, 2 * kPageSize); |
| 90 | |
Nicolas Geoffray | 4e915fb | 2015-10-28 17:39:47 +0000 | [diff] [blame] | 91 | // Data cache is 1 / 2 of the map. |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 92 | // TODO: Make this variable? |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 93 | size_t data_size = max_capacity / 2; |
| 94 | size_t code_size = max_capacity - data_size; |
| 95 | DCHECK_EQ(code_size + data_size, max_capacity); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 96 | uint8_t* divider = data_map->Begin() + data_size; |
| 97 | |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 98 | MemMap* code_map = |
| 99 | data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 100 | if (code_map == nullptr) { |
| 101 | std::ostringstream oss; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 102 | oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 103 | *error_msg = oss.str(); |
| 104 | return nullptr; |
| 105 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 106 | DCHECK_EQ(code_map->Begin(), divider); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 107 | data_size = initial_capacity / 2; |
| 108 | code_size = initial_capacity - data_size; |
| 109 | DCHECK_EQ(code_size + data_size, initial_capacity); |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 110 | return new JitCodeCache( |
Nicolas Geoffray | c3fec4c | 2016-01-14 16:16:35 +0000 | [diff] [blame] | 111 | code_map, data_map, code_size, data_size, max_capacity, garbage_collect_code); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 112 | } |
| 113 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 114 | JitCodeCache::JitCodeCache(MemMap* code_map, |
| 115 | MemMap* data_map, |
| 116 | size_t initial_code_capacity, |
| 117 | size_t initial_data_capacity, |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 118 | size_t max_capacity, |
| 119 | bool garbage_collect_code) |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 120 | : lock_("Jit code cache", kJitCodeCacheLock), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 121 | lock_cond_("Jit code cache variable", lock_), |
| 122 | collection_in_progress_(false), |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 123 | code_map_(code_map), |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 124 | data_map_(data_map), |
| 125 | max_capacity_(max_capacity), |
| 126 | current_capacity_(initial_code_capacity + initial_data_capacity), |
| 127 | code_end_(initial_code_capacity), |
| 128 | data_end_(initial_data_capacity), |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 129 | last_collection_increased_code_cache_(false), |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 130 | last_update_time_ns_(0), |
Nicolas Geoffray | 0a52223 | 2016-01-19 09:34:58 +0000 | [diff] [blame] | 131 | garbage_collect_code_(garbage_collect_code), |
Nicolas Geoffray | b0d2208 | 2016-02-24 17:18:25 +0000 | [diff] [blame] | 132 | used_memory_for_data_(0), |
| 133 | used_memory_for_code_(0), |
Nicolas Geoffray | fcdd729 | 2016-02-25 13:27:47 +0000 | [diff] [blame] | 134 | number_of_compilations_(0), |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 135 | number_of_osr_compilations_(0), |
| 136 | number_of_deoptimizations_(0), |
| 137 | number_of_collections_(0) { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 138 | |
Nicolas Geoffray | c3fec4c | 2016-01-14 16:16:35 +0000 | [diff] [blame] | 139 | DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 140 | code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/); |
| 141 | data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 142 | |
| 143 | if (code_mspace_ == nullptr || data_mspace_ == nullptr) { |
| 144 | PLOG(FATAL) << "create_mspace_with_base failed"; |
| 145 | } |
| 146 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 147 | SetFootprintLimit(current_capacity_); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 148 | |
| 149 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); |
| 150 | CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 151 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 152 | VLOG(jit) << "Created jit code cache: initial data size=" |
| 153 | << PrettySize(initial_data_capacity) |
| 154 | << ", initial code size=" |
| 155 | << PrettySize(initial_code_capacity); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 156 | } |
| 157 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 158 | bool JitCodeCache::ContainsPc(const void* ptr) const { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 159 | return code_map_->Begin() <= ptr && ptr < code_map_->End(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 160 | } |
| 161 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 162 | bool JitCodeCache::ContainsMethod(ArtMethod* method) { |
| 163 | MutexLock mu(Thread::Current(), lock_); |
| 164 | for (auto& it : method_code_map_) { |
| 165 | if (it.second == method) { |
| 166 | return true; |
| 167 | } |
| 168 | } |
| 169 | return false; |
| 170 | } |
| 171 | |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame^] | 172 | class ScopedCodeCacheWrite : ScopedTrace { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 173 | public: |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame^] | 174 | explicit ScopedCodeCacheWrite(MemMap* code_map) |
| 175 | : ScopedTrace("ScopedCodeCacheWrite"), |
| 176 | code_map_(code_map) { |
| 177 | ScopedTrace trace("mprotect all"); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 178 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 179 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 180 | ~ScopedCodeCacheWrite() { |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame^] | 181 | ScopedTrace trace("mprotect code"); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 182 | CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); |
| 183 | } |
| 184 | private: |
| 185 | MemMap* const code_map_; |
| 186 | |
| 187 | DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite); |
| 188 | }; |
| 189 | |
| 190 | uint8_t* JitCodeCache::CommitCode(Thread* self, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 191 | ArtMethod* method, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 192 | const uint8_t* mapping_table, |
| 193 | const uint8_t* vmap_table, |
| 194 | const uint8_t* gc_map, |
| 195 | size_t frame_size_in_bytes, |
| 196 | size_t core_spill_mask, |
| 197 | size_t fp_spill_mask, |
| 198 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 199 | size_t code_size, |
| 200 | bool osr) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 201 | uint8_t* result = CommitCodeInternal(self, |
| 202 | method, |
| 203 | mapping_table, |
| 204 | vmap_table, |
| 205 | gc_map, |
| 206 | frame_size_in_bytes, |
| 207 | core_spill_mask, |
| 208 | fp_spill_mask, |
| 209 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 210 | code_size, |
| 211 | osr); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 212 | if (result == nullptr) { |
| 213 | // Retry. |
| 214 | GarbageCollectCache(self); |
| 215 | result = CommitCodeInternal(self, |
| 216 | method, |
| 217 | mapping_table, |
| 218 | vmap_table, |
| 219 | gc_map, |
| 220 | frame_size_in_bytes, |
| 221 | core_spill_mask, |
| 222 | fp_spill_mask, |
| 223 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 224 | code_size, |
| 225 | osr); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 226 | } |
| 227 | return result; |
| 228 | } |
| 229 | |
| 230 | bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { |
| 231 | bool in_collection = false; |
| 232 | while (collection_in_progress_) { |
| 233 | in_collection = true; |
| 234 | lock_cond_.Wait(self); |
| 235 | } |
| 236 | return in_collection; |
| 237 | } |
| 238 | |
| 239 | static uintptr_t FromCodeToAllocation(const void* code) { |
| 240 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 241 | return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 242 | } |
| 243 | |
| 244 | void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) { |
| 245 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 246 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 247 | // Notify native debugger that we are about to remove the code. |
| 248 | // It does nothing if we are not using native debugger. |
| 249 | DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr)); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 250 | |
| 251 | FreeData(const_cast<uint8_t*>(method_header->GetNativeGcMap())); |
| 252 | FreeData(const_cast<uint8_t*>(method_header->GetMappingTable())); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 253 | // Use the offset directly to prevent sanity check that the method is |
| 254 | // compiled with optimizing. |
| 255 | // TODO(ngeoffray): Clean up. |
| 256 | if (method_header->vmap_table_offset_ != 0) { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 257 | const uint8_t* data = method_header->code_ - method_header->vmap_table_offset_; |
| 258 | FreeData(const_cast<uint8_t*>(data)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 259 | } |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 260 | FreeCode(reinterpret_cast<uint8_t*>(allocation)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 261 | } |
| 262 | |
| 263 | void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 264 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 265 | MutexLock mu(self, lock_); |
| 266 | // We do not check if a code cache GC is in progress, as this method comes |
| 267 | // with the classlinker_classes_lock_ held, and suspending ourselves could |
| 268 | // lead to a deadlock. |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 269 | { |
| 270 | ScopedCodeCacheWrite scc(code_map_.get()); |
| 271 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 272 | if (alloc.ContainsUnsafe(it->second)) { |
| 273 | FreeCode(it->first, it->second); |
| 274 | it = method_code_map_.erase(it); |
| 275 | } else { |
| 276 | ++it; |
| 277 | } |
| 278 | } |
| 279 | } |
Nicolas Geoffray | a9b9131 | 2016-02-17 09:49:19 +0000 | [diff] [blame] | 280 | for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) { |
| 281 | if (alloc.ContainsUnsafe(it->first)) { |
| 282 | // Note that the code has already been removed in the loop above. |
| 283 | it = osr_code_map_.erase(it); |
| 284 | } else { |
| 285 | ++it; |
| 286 | } |
| 287 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 288 | for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) { |
| 289 | ProfilingInfo* info = *it; |
| 290 | if (alloc.ContainsUnsafe(info->GetMethod())) { |
| 291 | info->GetMethod()->SetProfilingInfo(nullptr); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 292 | FreeData(reinterpret_cast<uint8_t*>(info)); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 293 | it = profiling_infos_.erase(it); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 294 | } else { |
| 295 | ++it; |
| 296 | } |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, |
| 301 | ArtMethod* method, |
| 302 | const uint8_t* mapping_table, |
| 303 | const uint8_t* vmap_table, |
| 304 | const uint8_t* gc_map, |
| 305 | size_t frame_size_in_bytes, |
| 306 | size_t core_spill_mask, |
| 307 | size_t fp_spill_mask, |
| 308 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 309 | size_t code_size, |
| 310 | bool osr) { |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 311 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 312 | // Ensure the header ends up at expected instruction alignment. |
| 313 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 314 | size_t total_size = header_size + code_size; |
| 315 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 316 | OatQuickMethodHeader* method_header = nullptr; |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 317 | uint8_t* code_ptr = nullptr; |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 318 | uint8_t* memory = nullptr; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 319 | { |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 320 | ScopedThreadSuspension sts(self, kSuspended); |
| 321 | MutexLock mu(self, lock_); |
| 322 | WaitForPotentialCollectionToComplete(self); |
| 323 | { |
| 324 | ScopedCodeCacheWrite scc(code_map_.get()); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 325 | memory = AllocateCode(total_size); |
| 326 | if (memory == nullptr) { |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 327 | return nullptr; |
| 328 | } |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 329 | code_ptr = memory + header_size; |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 330 | |
| 331 | std::copy(code, code + code_size, code_ptr); |
| 332 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 333 | new (method_header) OatQuickMethodHeader( |
| 334 | (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, |
| 335 | (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, |
| 336 | (gc_map == nullptr) ? 0 : code_ptr - gc_map, |
| 337 | frame_size_in_bytes, |
| 338 | core_spill_mask, |
| 339 | fp_spill_mask, |
| 340 | code_size); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 341 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 342 | |
Roland Levillain | 3243026 | 2016-02-01 15:23:20 +0000 | [diff] [blame] | 343 | FlushInstructionCache(reinterpret_cast<char*>(code_ptr), |
| 344 | reinterpret_cast<char*>(code_ptr + code_size)); |
Nicolas Geoffray | 0a52223 | 2016-01-19 09:34:58 +0000 | [diff] [blame] | 345 | number_of_compilations_++; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 346 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 347 | // We need to update the entry point in the runnable state for the instrumentation. |
| 348 | { |
| 349 | MutexLock mu(self, lock_); |
| 350 | method_code_map_.Put(code_ptr, method); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 351 | if (osr) { |
Nicolas Geoffray | fcdd729 | 2016-02-25 13:27:47 +0000 | [diff] [blame] | 352 | number_of_osr_compilations_++; |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 353 | osr_code_map_.Put(method, code_ptr); |
| 354 | } else { |
| 355 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 356 | method, method_header->GetEntryPoint()); |
| 357 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 358 | if (collection_in_progress_) { |
| 359 | // We need to update the live bitmap if there is a GC to ensure it sees this new |
| 360 | // code. |
| 361 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 362 | } |
Calin Juravle | 4d77b6a | 2015-12-01 18:38:09 +0000 | [diff] [blame] | 363 | last_update_time_ns_.StoreRelease(NanoTime()); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 364 | VLOG(jit) |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 365 | << "JIT added (osr = " << std::boolalpha << osr << std::noboolalpha << ") " |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 366 | << PrettyMethod(method) << "@" << method |
| 367 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 368 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " |
| 369 | << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << "," |
| 370 | << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_); |
| 371 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 372 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 373 | return reinterpret_cast<uint8_t*>(method_header); |
| 374 | } |
| 375 | |
| 376 | size_t JitCodeCache::CodeCacheSize() { |
| 377 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 378 | return CodeCacheSizeLocked(); |
| 379 | } |
| 380 | |
| 381 | size_t JitCodeCache::CodeCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 382 | return used_memory_for_code_; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | size_t JitCodeCache::DataCacheSize() { |
| 386 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 387 | return DataCacheSizeLocked(); |
| 388 | } |
| 389 | |
| 390 | size_t JitCodeCache::DataCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 391 | return used_memory_for_data_; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 392 | } |
| 393 | |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 394 | void JitCodeCache::ClearData(Thread* self, void* data) { |
| 395 | MutexLock mu(self, lock_); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 396 | FreeData(reinterpret_cast<uint8_t*>(data)); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 397 | } |
| 398 | |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 399 | uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) { |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 400 | size = RoundUp(size, sizeof(void*)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 401 | uint8_t* result = nullptr; |
| 402 | |
| 403 | { |
| 404 | ScopedThreadSuspension sts(self, kSuspended); |
| 405 | MutexLock mu(self, lock_); |
| 406 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 407 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | if (result == nullptr) { |
| 411 | // Retry. |
| 412 | GarbageCollectCache(self); |
| 413 | ScopedThreadSuspension sts(self, kSuspended); |
| 414 | MutexLock mu(self, lock_); |
| 415 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 416 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | return result; |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 420 | } |
| 421 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 422 | uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 423 | uint8_t* result = ReserveData(self, end - begin); |
| 424 | if (result == nullptr) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 425 | return nullptr; // Out of space in the data cache. |
| 426 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 427 | std::copy(begin, end, result); |
| 428 | return result; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 429 | } |
| 430 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 431 | class MarkCodeVisitor FINAL : public StackVisitor { |
| 432 | public: |
| 433 | MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in) |
| 434 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames), |
| 435 | code_cache_(code_cache_in), |
| 436 | bitmap_(code_cache_->GetLiveBitmap()) {} |
| 437 | |
| 438 | bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { |
| 439 | const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); |
| 440 | if (method_header == nullptr) { |
| 441 | return true; |
| 442 | } |
| 443 | const void* code = method_header->GetCode(); |
| 444 | if (code_cache_->ContainsPc(code)) { |
| 445 | // Use the atomic set version, as multiple threads are executing this code. |
| 446 | bitmap_->AtomicTestAndSet(FromCodeToAllocation(code)); |
| 447 | } |
| 448 | return true; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 449 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 450 | |
| 451 | private: |
| 452 | JitCodeCache* const code_cache_; |
| 453 | CodeCacheBitmap* const bitmap_; |
| 454 | }; |
| 455 | |
| 456 | class MarkCodeClosure FINAL : public Closure { |
| 457 | public: |
| 458 | MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier) |
| 459 | : code_cache_(code_cache), barrier_(barrier) {} |
| 460 | |
| 461 | void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 462 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 463 | DCHECK(thread == Thread::Current() || thread->IsSuspended()); |
| 464 | MarkCodeVisitor visitor(thread, code_cache_); |
| 465 | visitor.WalkStack(); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 466 | if (kIsDebugBuild) { |
| 467 | // The stack walking code queries the side instrumentation stack if it |
| 468 | // sees an instrumentation exit pc, so the JIT code of methods in that stack |
| 469 | // must have been seen. We sanity check this below. |
| 470 | for (const instrumentation::InstrumentationStackFrame& frame |
| 471 | : *thread->GetInstrumentationStack()) { |
| 472 | // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in |
| 473 | // its stack frame, it is not the method owning return_pc_. We just pass null to |
| 474 | // LookupMethodHeader: the method is only checked against in debug builds. |
| 475 | OatQuickMethodHeader* method_header = |
| 476 | code_cache_->LookupMethodHeader(frame.return_pc_, nullptr); |
| 477 | if (method_header != nullptr) { |
| 478 | const void* code = method_header->GetCode(); |
| 479 | CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code))); |
| 480 | } |
| 481 | } |
| 482 | } |
Mathieu Chartier | 10d2508 | 2015-10-28 18:36:09 -0700 | [diff] [blame] | 483 | barrier_->Pass(Thread::Current()); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 484 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 485 | |
| 486 | private: |
| 487 | JitCodeCache* const code_cache_; |
| 488 | Barrier* const barrier_; |
| 489 | }; |
| 490 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 491 | void JitCodeCache::NotifyCollectionDone(Thread* self) { |
| 492 | collection_in_progress_ = false; |
| 493 | lock_cond_.Broadcast(self); |
| 494 | } |
| 495 | |
| 496 | void JitCodeCache::SetFootprintLimit(size_t new_footprint) { |
| 497 | size_t per_space_footprint = new_footprint / 2; |
| 498 | DCHECK(IsAlignedParam(per_space_footprint, kPageSize)); |
| 499 | DCHECK_EQ(per_space_footprint * 2, new_footprint); |
| 500 | mspace_set_footprint_limit(data_mspace_, per_space_footprint); |
| 501 | { |
| 502 | ScopedCodeCacheWrite scc(code_map_.get()); |
| 503 | mspace_set_footprint_limit(code_mspace_, per_space_footprint); |
| 504 | } |
| 505 | } |
| 506 | |
| 507 | bool JitCodeCache::IncreaseCodeCacheCapacity() { |
| 508 | if (current_capacity_ == max_capacity_) { |
| 509 | return false; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 510 | } |
| 511 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 512 | // Double the capacity if we're below 1MB, or increase it by 1MB if |
| 513 | // we're above. |
| 514 | if (current_capacity_ < 1 * MB) { |
| 515 | current_capacity_ *= 2; |
| 516 | } else { |
| 517 | current_capacity_ += 1 * MB; |
| 518 | } |
| 519 | if (current_capacity_ > max_capacity_) { |
| 520 | current_capacity_ = max_capacity_; |
| 521 | } |
| 522 | |
| 523 | if (!kIsDebugBuild || VLOG_IS_ON(jit)) { |
| 524 | LOG(INFO) << "Increasing code cache capacity to " << PrettySize(current_capacity_); |
| 525 | } |
| 526 | |
| 527 | SetFootprintLimit(current_capacity_); |
| 528 | |
| 529 | return true; |
| 530 | } |
| 531 | |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 532 | void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { |
| 533 | Barrier barrier(0); |
| 534 | size_t threads_running_checkpoint = 0; |
| 535 | MarkCodeClosure closure(this, &barrier); |
| 536 | threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
| 537 | // Now that we have run our checkpoint, move to a suspended state and wait |
| 538 | // for other threads to run the checkpoint. |
| 539 | ScopedThreadSuspension sts(self, kSuspended); |
| 540 | if (threads_running_checkpoint != 0) { |
| 541 | barrier.Increment(self, threads_running_checkpoint); |
| 542 | } |
| 543 | } |
| 544 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 545 | bool JitCodeCache::ShouldDoFullCollection() { |
| 546 | if (current_capacity_ == max_capacity_) { |
| 547 | // Always do a full collection when the code cache is full. |
| 548 | return true; |
| 549 | } else if (current_capacity_ < kReservedCapacity) { |
| 550 | // Always do partial collection when the code cache size is below the reserved |
| 551 | // capacity. |
| 552 | return false; |
| 553 | } else if (last_collection_increased_code_cache_) { |
| 554 | // This time do a full collection. |
| 555 | return true; |
| 556 | } else { |
| 557 | // This time do a partial collection. |
| 558 | return false; |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 559 | } |
| 560 | } |
| 561 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 562 | void JitCodeCache::GarbageCollectCache(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 563 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 564 | if (!garbage_collect_code_) { |
| 565 | MutexLock mu(self, lock_); |
| 566 | IncreaseCodeCacheCapacity(); |
| 567 | return; |
| 568 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 569 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 570 | // Wait for an existing collection, or let everyone know we are starting one. |
| 571 | { |
| 572 | ScopedThreadSuspension sts(self, kSuspended); |
| 573 | MutexLock mu(self, lock_); |
| 574 | if (WaitForPotentialCollectionToComplete(self)) { |
| 575 | return; |
| 576 | } else { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 577 | number_of_collections_++; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 578 | live_bitmap_.reset(CodeCacheBitmap::Create( |
| 579 | "code-cache-bitmap", |
| 580 | reinterpret_cast<uintptr_t>(code_map_->Begin()), |
| 581 | reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2))); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 582 | collection_in_progress_ = true; |
| 583 | } |
| 584 | } |
| 585 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 586 | TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 587 | { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 588 | TimingLogger::ScopedTiming st("Code cache collection", &logger); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 589 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 590 | bool do_full_collection = false; |
| 591 | { |
| 592 | MutexLock mu(self, lock_); |
| 593 | do_full_collection = ShouldDoFullCollection(); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 594 | } |
| 595 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 596 | if (!kIsDebugBuild || VLOG_IS_ON(jit)) { |
| 597 | LOG(INFO) << "Do " |
| 598 | << (do_full_collection ? "full" : "partial") |
| 599 | << " code cache collection, code=" |
| 600 | << PrettySize(CodeCacheSize()) |
| 601 | << ", data=" << PrettySize(DataCacheSize()); |
| 602 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 603 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 604 | DoCollection(self, /* collect_profiling_info */ do_full_collection); |
| 605 | |
| 606 | if (!kIsDebugBuild || VLOG_IS_ON(jit)) { |
| 607 | LOG(INFO) << "After code cache collection, code=" |
| 608 | << PrettySize(CodeCacheSize()) |
| 609 | << ", data=" << PrettySize(DataCacheSize()); |
| 610 | } |
| 611 | |
| 612 | { |
| 613 | MutexLock mu(self, lock_); |
| 614 | |
| 615 | // Increase the code cache only when we do partial collections. |
| 616 | // TODO: base this strategy on how full the code cache is? |
| 617 | if (do_full_collection) { |
| 618 | last_collection_increased_code_cache_ = false; |
| 619 | } else { |
| 620 | last_collection_increased_code_cache_ = true; |
| 621 | IncreaseCodeCacheCapacity(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 622 | } |
| 623 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 624 | bool next_collection_will_be_full = ShouldDoFullCollection(); |
| 625 | |
| 626 | // Start polling the liveness of compiled code to prepare for the next full collection. |
| 627 | // We avoid doing this if exit stubs are installed to not mess with the instrumentation. |
| 628 | // TODO(ngeoffray): Clean up instrumentation and code cache interactions. |
| 629 | if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() && |
| 630 | next_collection_will_be_full) { |
| 631 | // Save the entry point of methods we have compiled, and update the entry |
| 632 | // point of those methods to the interpreter. If the method is invoked, the |
| 633 | // interpreter will update its entry point to the compiled code and call it. |
| 634 | for (ProfilingInfo* info : profiling_infos_) { |
| 635 | const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
| 636 | if (ContainsPc(entry_point)) { |
| 637 | info->SetSavedEntryPoint(entry_point); |
| 638 | info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
| 643 | } |
| 644 | live_bitmap_.reset(nullptr); |
| 645 | NotifyCollectionDone(self); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 646 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 647 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 648 | Runtime::Current()->GetJit()->AddTimingLogger(logger); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 649 | } |
| 650 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 651 | void JitCodeCache::RemoveUnmarkedCode(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 652 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 653 | MutexLock mu(self, lock_); |
| 654 | ScopedCodeCacheWrite scc(code_map_.get()); |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 655 | // Iterate over all compiled code and remove entries that are not marked. |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 656 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 657 | const void* code_ptr = it->first; |
| 658 | ArtMethod* method = it->second; |
| 659 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 660 | if (GetLiveBitmap()->Test(allocation)) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 661 | ++it; |
| 662 | } else { |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 663 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 664 | if (method_header->GetEntryPoint() == GetQuickToInterpreterBridge()) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 665 | method->ClearCounter(); |
| 666 | } |
| 667 | FreeCode(code_ptr, method); |
| 668 | it = method_code_map_.erase(it); |
| 669 | } |
| 670 | } |
| 671 | } |
| 672 | |
| 673 | void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 674 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 675 | { |
| 676 | MutexLock mu(self, lock_); |
| 677 | if (collect_profiling_info) { |
| 678 | // Clear the profiling info of methods that do not have compiled code as entrypoint. |
| 679 | // Also remove the saved entry point from the ProfilingInfo objects. |
| 680 | for (ProfilingInfo* info : profiling_infos_) { |
| 681 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
| 682 | if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) { |
| 683 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 684 | } |
| 685 | info->SetSavedEntryPoint(nullptr); |
| 686 | } |
| 687 | } else if (kIsDebugBuild) { |
| 688 | // Sanity check that the profiling infos do not have a dangling entry point. |
| 689 | for (ProfilingInfo* info : profiling_infos_) { |
| 690 | DCHECK(info->GetSavedEntryPoint() == nullptr); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 691 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 692 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 693 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 694 | // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not |
| 695 | // an entry point is either: |
| 696 | // - an osr compiled code, that will be removed if not in a thread call stack. |
| 697 | // - discarded compiled code, that will be removed if not in a thread call stack. |
| 698 | for (const auto& it : method_code_map_) { |
| 699 | ArtMethod* method = it.second; |
| 700 | const void* code_ptr = it.first; |
| 701 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 702 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 703 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 704 | } |
| 705 | } |
| 706 | |
Nicolas Geoffray | d9994f0 | 2016-02-11 17:35:55 +0000 | [diff] [blame] | 707 | // Empty osr method map, as osr compiled code will be deleted (except the ones |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 708 | // on thread stacks). |
| 709 | osr_code_map_.clear(); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | // Run a checkpoint on all threads to mark the JIT compiled code they are running. |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 713 | MarkCompiledCodeOnThreadStacks(self); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 714 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 715 | // At this point, mutator threads are still running, and entrypoints of methods can |
| 716 | // change. We do know they cannot change to a code cache entry that is not marked, |
| 717 | // therefore we can safely remove those entries. |
| 718 | RemoveUnmarkedCode(self); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 719 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 720 | if (collect_profiling_info) { |
| 721 | MutexLock mu(self, lock_); |
| 722 | // Free all profiling infos of methods not compiled nor being compiled. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 723 | auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(), |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 724 | [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 725 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 511e41b | 2016-03-02 17:09:35 +0000 | [diff] [blame] | 726 | // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope |
| 727 | // that the compiled code would not get revived. As mutator threads run concurrently, |
| 728 | // they may have revived the compiled code, and now we are in the situation where |
| 729 | // a method has compiled code but no ProfilingInfo. |
| 730 | // We make sure compiled methods have a ProfilingInfo object. It is needed for |
| 731 | // code cache collection. |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 732 | if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) { |
Nicolas Geoffray | 511e41b | 2016-03-02 17:09:35 +0000 | [diff] [blame] | 733 | // We clear the inline caches as classes in it might be stalled. |
| 734 | info->ClearInlineCaches(); |
| 735 | // Do a fence to make sure the clearing is seen before attaching to the method. |
| 736 | QuasiAtomic::ThreadFenceRelease(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 737 | info->GetMethod()->SetProfilingInfo(info); |
| 738 | } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) { |
| 739 | // No need for this ProfilingInfo object anymore. |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 740 | FreeData(reinterpret_cast<uint8_t*>(info)); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 741 | return true; |
| 742 | } |
| 743 | return false; |
| 744 | }); |
| 745 | profiling_infos_.erase(profiling_kept_end, profiling_infos_.end()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 746 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 747 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 748 | } |
| 749 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 750 | bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 751 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 752 | // Check that methods we have compiled do have a ProfilingInfo object. We would |
| 753 | // have memory leaks of compiled code otherwise. |
| 754 | for (const auto& it : method_code_map_) { |
| 755 | ArtMethod* method = it.second; |
| 756 | if (method->GetProfilingInfo(sizeof(void*)) == nullptr) { |
| 757 | const void* code_ptr = it.first; |
| 758 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 759 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 760 | // If the code is not dead, then we have a problem. Note that this can even |
| 761 | // happen just after a collection, as mutator threads are running in parallel |
| 762 | // and could deoptimize an existing compiled code. |
| 763 | return false; |
| 764 | } |
| 765 | } |
| 766 | } |
| 767 | return true; |
| 768 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 769 | |
| 770 | OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { |
| 771 | static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA"); |
| 772 | if (kRuntimeISA == kArm) { |
| 773 | // On Thumb-2, the pc is offset by one. |
| 774 | --pc; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 775 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 776 | if (!ContainsPc(reinterpret_cast<const void*>(pc))) { |
| 777 | return nullptr; |
| 778 | } |
| 779 | |
| 780 | MutexLock mu(Thread::Current(), lock_); |
| 781 | if (method_code_map_.empty()) { |
| 782 | return nullptr; |
| 783 | } |
| 784 | auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc)); |
| 785 | --it; |
| 786 | |
| 787 | const void* code_ptr = it->first; |
| 788 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 789 | if (!method_header->Contains(pc)) { |
| 790 | return nullptr; |
| 791 | } |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 792 | if (kIsDebugBuild && method != nullptr) { |
| 793 | DCHECK_EQ(it->second, method) |
| 794 | << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc; |
| 795 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 796 | return method_header; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 797 | } |
| 798 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 799 | OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) { |
| 800 | MutexLock mu(Thread::Current(), lock_); |
| 801 | auto it = osr_code_map_.find(method); |
| 802 | if (it == osr_code_map_.end()) { |
| 803 | return nullptr; |
| 804 | } |
| 805 | return OatQuickMethodHeader::FromCodePointer(it->second); |
| 806 | } |
| 807 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 808 | ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, |
| 809 | ArtMethod* method, |
| 810 | const std::vector<uint32_t>& entries, |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 811 | bool retry_allocation) |
| 812 | // No thread safety analysis as we are using TryLock/Unlock explicitly. |
| 813 | NO_THREAD_SAFETY_ANALYSIS { |
| 814 | ProfilingInfo* info = nullptr; |
| 815 | if (!retry_allocation) { |
| 816 | // If we are allocating for the interpreter, just try to lock, to avoid |
| 817 | // lock contention with the JIT. |
| 818 | if (lock_.ExclusiveTryLock(self)) { |
| 819 | info = AddProfilingInfoInternal(self, method, entries); |
| 820 | lock_.ExclusiveUnlock(self); |
| 821 | } |
| 822 | } else { |
| 823 | { |
| 824 | MutexLock mu(self, lock_); |
| 825 | info = AddProfilingInfoInternal(self, method, entries); |
| 826 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 827 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 828 | if (info == nullptr) { |
| 829 | GarbageCollectCache(self); |
| 830 | MutexLock mu(self, lock_); |
| 831 | info = AddProfilingInfoInternal(self, method, entries); |
| 832 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 833 | } |
| 834 | return info; |
| 835 | } |
| 836 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 837 | ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED, |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 838 | ArtMethod* method, |
| 839 | const std::vector<uint32_t>& entries) { |
| 840 | size_t profile_info_size = RoundUp( |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 841 | sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(), |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 842 | sizeof(void*)); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 843 | |
| 844 | // Check whether some other thread has concurrently created it. |
| 845 | ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); |
| 846 | if (info != nullptr) { |
| 847 | return info; |
| 848 | } |
| 849 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 850 | uint8_t* data = AllocateData(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 851 | if (data == nullptr) { |
| 852 | return nullptr; |
| 853 | } |
| 854 | info = new (data) ProfilingInfo(method, entries); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 855 | |
| 856 | // Make sure other threads see the data in the profiling info object before the |
| 857 | // store in the ArtMethod's ProfilingInfo pointer. |
| 858 | QuasiAtomic::ThreadFenceRelease(); |
| 859 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 860 | method->SetProfilingInfo(info); |
| 861 | profiling_infos_.push_back(info); |
| 862 | return info; |
| 863 | } |
| 864 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 865 | // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock |
| 866 | // is already held. |
| 867 | void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS { |
| 868 | if (code_mspace_ == mspace) { |
| 869 | size_t result = code_end_; |
| 870 | code_end_ += increment; |
| 871 | return reinterpret_cast<void*>(result + code_map_->Begin()); |
| 872 | } else { |
| 873 | DCHECK_EQ(data_mspace_, mspace); |
| 874 | size_t result = data_end_; |
| 875 | data_end_ += increment; |
| 876 | return reinterpret_cast<void*>(result + data_map_->Begin()); |
| 877 | } |
| 878 | } |
| 879 | |
Calin Juravle | b4eddd2 | 2016-01-13 15:52:33 -0800 | [diff] [blame] | 880 | void JitCodeCache::GetCompiledArtMethods(const std::set<std::string>& dex_base_locations, |
Calin Juravle | 4d77b6a | 2015-12-01 18:38:09 +0000 | [diff] [blame] | 881 | std::vector<ArtMethod*>& methods) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 882 | ScopedTrace trace(__FUNCTION__); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 883 | MutexLock mu(Thread::Current(), lock_); |
| 884 | for (auto it : method_code_map_) { |
Calin Juravle | 66f5523 | 2015-12-08 15:09:10 +0000 | [diff] [blame] | 885 | if (ContainsElement(dex_base_locations, it.second->GetDexFile()->GetBaseLocation())) { |
Calin Juravle | 4d77b6a | 2015-12-01 18:38:09 +0000 | [diff] [blame] | 886 | methods.push_back(it.second); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 887 | } |
| 888 | } |
| 889 | } |
| 890 | |
Calin Juravle | 4d77b6a | 2015-12-01 18:38:09 +0000 | [diff] [blame] | 891 | uint64_t JitCodeCache::GetLastUpdateTimeNs() const { |
| 892 | return last_update_time_ns_.LoadAcquire(); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 893 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 894 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 895 | bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) { |
| 896 | if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 897 | VLOG(jit) << PrettyMethod(method) << " is already compiled"; |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 898 | return false; |
| 899 | } |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 900 | |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 901 | MutexLock mu(self, lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 902 | if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 903 | VLOG(jit) << PrettyMethod(method) << " is already osr compiled"; |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 904 | return false; |
| 905 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 906 | |
Nicolas Geoffray | c26f128 | 2016-01-29 11:41:25 +0000 | [diff] [blame] | 907 | ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 908 | if (info == nullptr) { |
| 909 | VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled"; |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 910 | return false; |
| 911 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 912 | |
| 913 | if (info->IsMethodBeingCompiled()) { |
| 914 | VLOG(jit) << PrettyMethod(method) << " is already being compiled"; |
| 915 | return false; |
| 916 | } |
| 917 | |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 918 | info->SetIsMethodBeingCompiled(true); |
| 919 | return true; |
| 920 | } |
| 921 | |
| 922 | void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) { |
| 923 | ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); |
| 924 | DCHECK(info->IsMethodBeingCompiled()); |
| 925 | info->SetIsMethodBeingCompiled(false); |
| 926 | } |
| 927 | |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 928 | size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) { |
| 929 | MutexLock mu(Thread::Current(), lock_); |
| 930 | return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr))); |
| 931 | } |
| 932 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 933 | void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, |
| 934 | const OatQuickMethodHeader* header) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 935 | ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*)); |
| 936 | if ((profiling_info != nullptr) && |
| 937 | (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) { |
| 938 | // Prevent future uses of the compiled code. |
| 939 | profiling_info->SetSavedEntryPoint(nullptr); |
| 940 | } |
| 941 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 942 | if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) { |
| 943 | // The entrypoint is the one to invalidate, so we just update |
| 944 | // it to the interpreter entry point and clear the counter to get the method |
| 945 | // Jitted again. |
| 946 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 947 | method, GetQuickToInterpreterBridge()); |
| 948 | method->ClearCounter(); |
| 949 | } else { |
| 950 | MutexLock mu(Thread::Current(), lock_); |
| 951 | auto it = osr_code_map_.find(method); |
| 952 | if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) { |
| 953 | // Remove the OSR method, to avoid using it again. |
| 954 | osr_code_map_.erase(it); |
| 955 | } |
| 956 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 957 | MutexLock mu(Thread::Current(), lock_); |
| 958 | number_of_deoptimizations_++; |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 959 | } |
| 960 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 961 | uint8_t* JitCodeCache::AllocateCode(size_t code_size) { |
| 962 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 963 | uint8_t* result = reinterpret_cast<uint8_t*>( |
| 964 | mspace_memalign(code_mspace_, alignment, code_size)); |
| 965 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 966 | // Ensure the header ends up at expected instruction alignment. |
| 967 | DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment); |
| 968 | used_memory_for_code_ += mspace_usable_size(result); |
| 969 | return result; |
| 970 | } |
| 971 | |
| 972 | void JitCodeCache::FreeCode(uint8_t* code) { |
| 973 | used_memory_for_code_ -= mspace_usable_size(code); |
| 974 | mspace_free(code_mspace_, code); |
| 975 | } |
| 976 | |
| 977 | uint8_t* JitCodeCache::AllocateData(size_t data_size) { |
| 978 | void* result = mspace_malloc(data_mspace_, data_size); |
| 979 | used_memory_for_data_ += mspace_usable_size(result); |
| 980 | return reinterpret_cast<uint8_t*>(result); |
| 981 | } |
| 982 | |
| 983 | void JitCodeCache::FreeData(uint8_t* data) { |
| 984 | used_memory_for_data_ -= mspace_usable_size(data); |
| 985 | mspace_free(data_mspace_, data); |
| 986 | } |
| 987 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 988 | void JitCodeCache::Dump(std::ostream& os) { |
| 989 | MutexLock mu(Thread::Current(), lock_); |
| 990 | os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n" |
| 991 | << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n" |
| 992 | << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n" |
| 993 | << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n" |
| 994 | << "Total number of JIT compilations: " << number_of_compilations_ << "\n" |
| 995 | << "Total number of JIT compilations for on stack replacement: " |
| 996 | << number_of_osr_compilations_ << "\n" |
| 997 | << "Total number of deoptimizations: " << number_of_deoptimizations_ << "\n" |
| 998 | << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl; |
| 999 | } |
| 1000 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1001 | } // namespace jit |
| 1002 | } // namespace art |