blob: cc7df1ce21dba0b8444667c4d2bc2762ea92ccdb [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_IMAGE_WRITER_H_
18#define ART_COMPILER_IMAGE_WRITER_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include <stdint.h>
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "base/memory_tool.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022
23#include <cstddef>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Brian Carlstrom7940e442013-07-12 13:46:57 -070025#include <set>
Mathieu Chartier496577f2016-09-20 15:33:31 -070026#include <stack>
Brian Carlstrom7940e442013-07-12 13:46:57 -070027#include <string>
Igor Murashkinf5b4c502014-11-14 15:01:59 -080028#include <ostream>
Brian Carlstrom7940e442013-07-12 13:46:57 -070029
Alex Lightce77fc02016-12-15 01:05:52 +000030#include "art_method.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010031#include "base/bit_utils.h"
Vladimir Marko944da602016-02-19 12:27:55 +000032#include "base/dchecked_vector.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070033#include "base/enums.h"
Alex Lighte64300b2015-12-15 15:02:47 -080034#include "base/length_prefixed_array.h"
Igor Murashkin46774762014-10-22 11:37:02 -070035#include "base/macros.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070036#include "driver/compiler_driver.h"
Mathieu Chartierfd04b6f2014-11-14 19:34:18 -080037#include "gc/space/space.h"
Mathieu Chartierceb07b32015-12-10 09:33:21 -080038#include "image.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070039#include "lock_word.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070040#include "mem_map.h"
41#include "oat_file.h"
42#include "mirror/dex_cache.h"
43#include "os.h"
44#include "safe_map.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070045#include "utils.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070046
47namespace art {
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080048namespace gc {
49namespace space {
50class ImageSpace;
51} // namespace space
52} // namespace gc
Brian Carlstrom7940e442013-07-12 13:46:57 -070053
Vladimir Markoc5798bf2016-12-09 10:20:54 +000054class ClassLoaderVisitor;
Mathieu Chartier1f47b672016-01-07 16:29:01 -080055class ClassTable;
56
Mathieu Chartierfbc31082016-01-24 11:59:56 -080057static constexpr int kInvalidFd = -1;
Mathieu Chartiera90c7722015-10-29 15:41:36 -070058
Brian Carlstrom7940e442013-07-12 13:46:57 -070059// Write a Space built during compilation for use during execution.
Igor Murashkin46774762014-10-22 11:37:02 -070060class ImageWriter FINAL {
Brian Carlstrom7940e442013-07-12 13:46:57 -070061 public:
Mathieu Chartierda5b28a2015-11-05 08:03:47 -080062 ImageWriter(const CompilerDriver& compiler_driver,
63 uintptr_t image_begin,
64 bool compile_pic,
Mathieu Chartierceb07b32015-12-10 09:33:21 -080065 bool compile_app_image,
Jeff Haodcdc85b2015-12-04 14:06:18 -080066 ImageHeader::StorageMode image_storage_mode,
Vladimir Marko944da602016-02-19 12:27:55 +000067 const std::vector<const char*>& oat_filenames,
68 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
Brian Carlstrom7940e442013-07-12 13:46:57 -070069
Vladimir Markof4da6752014-08-01 19:04:18 +010070 bool PrepareImageAddressSpace();
71
72 bool IsImageAddressSpaceReady() const {
Vladimir Marko944da602016-02-19 12:27:55 +000073 DCHECK(!image_infos_.empty());
74 for (const ImageInfo& image_info : image_infos_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080075 if (image_info.image_roots_address_ == 0u) {
76 return false;
77 }
78 }
Vladimir Marko944da602016-02-19 12:27:55 +000079 return true;
Vladimir Markof4da6752014-08-01 19:04:18 +010080 }
81
Mathieu Chartiere401d142015-04-22 13:56:20 -070082 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070083 T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
Jeff Haodcdc85b2015-12-04 14:06:18 -080084 if (object == nullptr || IsInBootImage(object)) {
85 return object;
86 } else {
Vladimir Marko944da602016-02-19 12:27:55 +000087 size_t oat_index = GetOatIndex(object);
88 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -080089 return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
90 }
Vladimir Markof4da6752014-08-01 19:04:18 +010091 }
92
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070093 ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -070094
Vladimir Marko05792b92015-08-03 11:56:49 +010095 template <typename PtrType>
96 PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070097 const REQUIRES_SHARED(Locks::mutator_lock_) {
Vladimir Marko944da602016-02-19 12:27:55 +000098 auto oat_it = dex_file_oat_index_map_.find(dex_file);
99 DCHECK(oat_it != dex_file_oat_index_map_.end());
100 const ImageInfo& image_info = GetImageInfo(oat_it->second);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800101 auto it = image_info.dex_cache_array_starts_.find(dex_file);
102 DCHECK(it != image_info.dex_cache_array_starts_.end());
Vladimir Marko05792b92015-08-03 11:56:49 +0100103 return reinterpret_cast<PtrType>(
Jeff Haodcdc85b2015-12-04 14:06:18 -0800104 image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
105 it->second + offset);
Vladimir Marko20f85592015-03-19 10:07:02 +0000106 }
107
Vladimir Marko944da602016-02-19 12:27:55 +0000108 size_t GetOatFileOffset(size_t oat_index) const {
109 return GetImageInfo(oat_index).oat_offset_;
110 }
111
112 const uint8_t* GetOatFileBegin(size_t oat_index) const {
113 return GetImageInfo(oat_index).oat_file_begin_;
114 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100115
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800116 // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
Jeff Haodcdc85b2015-12-04 14:06:18 -0800117 // the names in image_filenames.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800118 // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
119 // the names in oat_filenames.
Mathieu Chartiera90c7722015-10-29 15:41:36 -0700120 bool Write(int image_fd,
Jeff Haodcdc85b2015-12-04 14:06:18 -0800121 const std::vector<const char*>& image_filenames,
Vladimir Marko944da602016-02-19 12:27:55 +0000122 const std::vector<const char*>& oat_filenames)
Mathieu Chartier90443472015-07-16 20:32:27 -0700123 REQUIRES(!Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700124
Vladimir Marko944da602016-02-19 12:27:55 +0000125 uintptr_t GetOatDataBegin(size_t oat_index) {
126 return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700127 }
128
Vladimir Marko944da602016-02-19 12:27:55 +0000129 // Get the index of the oat file containing the dex file.
130 //
131 // This "oat_index" is used to retrieve information about the the memory layout
132 // of the oat file and its associated image file, needed for link-time patching
133 // of references to the image or across oat files.
134 size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
135
136 // Get the index of the oat file containing the dex file served by the dex cache.
Mathieu Chartierc4f39252016-10-05 18:32:08 -0700137 size_t GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700138 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800139
Vladimir Marko944da602016-02-19 12:27:55 +0000140 // Update the oat layout for the given oat file.
141 // This will make the oat_offset for the next oat file valid.
142 void UpdateOatFileLayout(size_t oat_index,
143 size_t oat_loaded_size,
144 size_t oat_data_offset,
145 size_t oat_data_size);
146 // Update information about the oat header, i.e. checksum and trampoline offsets.
147 void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800148
Brian Carlstrom7940e442013-07-12 13:46:57 -0700149 private:
Mathieu Chartier496577f2016-09-20 15:33:31 -0700150 using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
151
Brian Carlstrom7940e442013-07-12 13:46:57 -0700152 bool AllocMemory();
153
Mathieu Chartier31e89252013-08-28 11:29:12 -0700154 // Mark the objects defined in this space in the given live bitmap.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700155 void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier31e89252013-08-28 11:29:12 -0700156
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800157 // Classify different kinds of bins that objects end up getting packed into during image writing.
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700158 // Ordered from dirtiest to cleanest (until ArtMethods).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800159 enum Bin {
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700160 kBinMiscDirty, // Dex caches, object locks, etc...
161 kBinClassVerified, // Class verified, but initializers haven't been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700162 // Unknown mix of clean/dirty:
163 kBinRegular,
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700164 kBinClassInitialized, // Class initializers have been run
Mathieu Chartierd464fa12016-04-08 18:54:36 -0700165 // All classes get their own bins since their fields often dirty
166 kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
Mathieu Chartier2ba04ea2016-04-08 19:01:05 -0700167 // Likely-clean:
168 kBinString, // [String] Almost always immutable (except for obj header).
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800169 // Add more bins here if we add more segregation code.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700170 // Non mirror fields must be below.
171 // ArtFields should be always clean.
Mathieu Chartierc7853442015-03-27 14:35:38 -0700172 kBinArtField,
Mathieu Chartiere401d142015-04-22 13:56:20 -0700173 // If the class is initialized, then the ArtMethods are probably clean.
174 kBinArtMethodClean,
175 // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
176 // initialized.
177 kBinArtMethodDirty,
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000178 // IMT (clean)
179 kBinImTable,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700180 // Conflict tables (clean).
181 kBinIMTConflictTable,
182 // Runtime methods (always clean, do not have a length prefix array).
183 kBinRuntimeMethod,
Vladimir Marko05792b92015-08-03 11:56:49 +0100184 // Dex cache arrays have a special slot for PC-relative addressing. Since they are
185 // huge, and as such their dirtiness is not important for the clean/dirty separation,
186 // we arbitrarily keep them at the end of the native data.
187 kBinDexCacheArray, // Arrays belonging to dex cache.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800188 kBinSize,
Mathieu Chartierc7853442015-03-27 14:35:38 -0700189 // Number of bins which are for mirror objects.
190 kBinMirrorCount = kBinArtField,
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800191 };
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800192 friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
193
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700194 enum NativeObjectRelocationType {
195 kNativeObjectRelocationTypeArtField,
196 kNativeObjectRelocationTypeArtFieldArray,
197 kNativeObjectRelocationTypeArtMethodClean,
198 kNativeObjectRelocationTypeArtMethodArrayClean,
199 kNativeObjectRelocationTypeArtMethodDirty,
200 kNativeObjectRelocationTypeArtMethodArrayDirty,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700201 kNativeObjectRelocationTypeRuntimeMethod,
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000202 kNativeObjectRelocationTypeIMTable,
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700203 kNativeObjectRelocationTypeIMTConflictTable,
Vladimir Marko05792b92015-08-03 11:56:49 +0100204 kNativeObjectRelocationTypeDexCacheArray,
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700205 };
206 friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
207
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800208 enum OatAddress {
209 kOatAddressInterpreterToInterpreterBridge,
210 kOatAddressInterpreterToCompiledCodeBridge,
211 kOatAddressJNIDlsymLookup,
212 kOatAddressQuickGenericJNITrampoline,
213 kOatAddressQuickIMTConflictTrampoline,
214 kOatAddressQuickResolutionTrampoline,
215 kOatAddressQuickToInterpreterBridge,
216 // Number of elements in the enum.
217 kOatAddressCount,
218 };
219 friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
220
Vladimir Marko80afd022015-05-19 18:08:00 +0100221 static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800222 // uint32 = typeof(lockword_)
Mathieu Chartiere401d142015-04-22 13:56:20 -0700223 // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
224 // failures due to invalid read barrier bits during object field reads.
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700225 static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800226 // 111000.....0
Mathieu Chartiere401d142015-04-22 13:56:20 -0700227 static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800228
229 // We use the lock word to store the bin # and bin index of the object in the image.
230 //
231 // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
232 // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
233 struct BinSlot {
234 explicit BinSlot(uint32_t lockword);
235 BinSlot(Bin bin, uint32_t index);
236
237 // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
238 Bin GetBin() const;
239 // The offset in bytes from the beginning of the bin. Aligned to object size.
240 uint32_t GetIndex() const;
241 // Pack into a single uint32_t, for storing into a lock word.
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700242 uint32_t Uint32Value() const { return lockword_; }
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800243 // Comparison operator for map support
244 bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
245
246 private:
247 // Must be the same size as LockWord, any larger and we would truncate the data.
248 const uint32_t lockword_;
249 };
250
Jeff Haodcdc85b2015-12-04 14:06:18 -0800251 struct ImageInfo {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800252 ImageInfo();
253 ImageInfo(ImageInfo&&) = default;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800254
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800255 // Create the image sections into the out sections variable, returns the size of the image
256 // excluding the bitmap.
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700257 size_t CreateImageSections(ImageSection* out_sections) const;
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800258
Jeff Haodcdc85b2015-12-04 14:06:18 -0800259 std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
260
261 // Target begin of this image. Notes: It is not valid to write here, this is the address
262 // of the target image, not necessarily where image_ is mapped. The address is only valid
263 // after layouting (otherwise null).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800264 uint8_t* image_begin_ = nullptr;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800265
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800266 // Offset to the free space in image_, initially size of image header.
267 size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
268 uint32_t image_roots_address_ = 0; // The image roots address in the image.
269 size_t image_offset_ = 0; // Offset of this image from the start of the first image.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800270
271 // Image size is the *address space* covered by this image. As the live bitmap is aligned
272 // to the page size, the live bitmap will cover more address space than necessary. But live
273 // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
274 // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
275 // page-aligned).
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800276 size_t image_size_ = 0;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800277
278 // Oat data.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800279 // Offset of the oat file for this image from start of oat files. This is
280 // valid when the previous oat file has been written.
281 size_t oat_offset_ = 0;
Vladimir Marko944da602016-02-19 12:27:55 +0000282 // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
283 const uint8_t* oat_file_begin_ = nullptr;
284 size_t oat_loaded_size_ = 0;
285 const uint8_t* oat_data_begin_ = nullptr;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800286 size_t oat_size_ = 0; // Size of the corresponding oat data.
Vladimir Marko944da602016-02-19 12:27:55 +0000287 // The oat header checksum, valid after UpdateOatFileHeader().
288 uint32_t oat_checksum_ = 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800289
290 // Image bitmap which lets us know where the objects inside of the image reside.
291 std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
292
293 // The start offsets of the dex cache arrays.
294 SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
295
296 // Offset from oat_data_begin_ to the stubs.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800297 uint32_t oat_address_offsets_[kOatAddressCount] = {};
Jeff Haodcdc85b2015-12-04 14:06:18 -0800298
299 // Bin slot tracking for dirty object packing.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800300 size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
301 size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
302 size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
303
304 // Cached size of the intern table for when we allocate memory.
305 size_t intern_table_bytes_ = 0;
306
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800307 // Number of image class table bytes.
308 size_t class_table_bytes_ = 0;
309
310 // Intern table associated with this image for serialization.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800311 std::unique_ptr<InternTable> intern_table_;
Mathieu Chartier1f47b672016-01-07 16:29:01 -0800312
313 // Class table associated with this image for serialization.
314 std::unique_ptr<ClassTable> class_table_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800315 };
316
Mathieu Chartier31e89252013-08-28 11:29:12 -0700317 // We use the lock word to store the offset of the object in the image.
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800318 void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700319 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700320 void SetImageOffset(mirror::Object* object, size_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700321 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700322 bool IsImageOffsetAssigned(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700323 REQUIRES_SHARED(Locks::mutator_lock_);
324 size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700325 void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700326 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700327
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700328 void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700329 void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
330 REQUIRES_SHARED(Locks::mutator_lock_);
331 mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
332 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800333 void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700334 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800335 bool IsImageBinSlotAssigned(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700336 REQUIRES_SHARED(Locks::mutator_lock_);
337 BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800338
Mathieu Chartierc4f39252016-10-05 18:32:08 -0700339 void AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr<mirror::DexCache> dex_cache)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700340 REQUIRES_SHARED(Locks::mutator_lock_);
341 void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700342
Alex Lighta59dd802014-07-02 16:28:08 -0700343 static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700344 REQUIRES_SHARED(Locks::mutator_lock_) {
Alex Lighta59dd802014-07-02 16:28:08 -0700345 return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
346 }
347
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700348 mirror::Object* GetLocalAddress(mirror::Object* object) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700349 REQUIRES_SHARED(Locks::mutator_lock_) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700350 size_t offset = GetImageOffset(object);
Vladimir Marko944da602016-02-19 12:27:55 +0000351 size_t oat_index = GetOatIndex(object);
352 const ImageInfo& image_info = GetImageInfo(oat_index);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800353 uint8_t* dst = image_info.image_->Begin() + offset;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700354 return reinterpret_cast<mirror::Object*>(dst);
355 }
356
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800357 // Returns the address in the boot image if we are compiling the app image.
358 const uint8_t* GetOatAddress(OatAddress type) const;
359
Jeff Haodcdc85b2015-12-04 14:06:18 -0800360 const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700361 // With Quick, code is within the OatFile, as there are all in one
Jeff Haodcdc85b2015-12-04 14:06:18 -0800362 // .o ELF object. But interpret it as signed.
363 DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
364 DCHECK(image_info.oat_data_begin_ != nullptr);
365 return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700366 }
367
Brian Carlstrom7940e442013-07-12 13:46:57 -0700368 // Returns true if the class was in the original requested image classes list.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700369 bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700370
371 // Debug aid that list of requested image classes.
372 void DumpImageClasses();
373
374 // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
375 void ComputeLazyFieldsForImageClasses()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700376 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700377
Vladimir Markoc5798bf2016-12-09 10:20:54 +0000378 // Visit all class loaders.
379 void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
380
Brian Carlstrom7940e442013-07-12 13:46:57 -0700381 // Remove unwanted classes from various roots.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700382 void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700383
384 // Verify unwanted classes removed.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700385 void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700386 static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700387 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700388
389 // Lays out where the image objects will be at runtime.
Vladimir Markof4da6752014-08-01 19:04:18 +0100390 void CalculateNewObjectOffsets()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700391 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700392 void ProcessWorkStack(WorkStack* work_stack)
393 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000394 void CreateHeader(size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700395 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko944da602016-02-19 12:27:55 +0000396 mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700397 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800398 void CalculateObjectBinSlots(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700399 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800400 void UnbinObjectsIntoOffset(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700401 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700402
Mathieu Chartier496577f2016-09-20 15:33:31 -0700403 static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700404 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier496577f2016-09-20 15:33:31 -0700405 static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700406 REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800407 static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700408 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700409
410 // Creates the contiguous image in memory and adjusts pointers.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700411 void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
412 void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700413 static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700414 REQUIRES_SHARED(Locks::mutator_lock_);
415 void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800416 void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700417 REQUIRES_SHARED(Locks::mutator_lock_);
418 void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700419 void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700420 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700421 void FixupClass(mirror::Class* orig, mirror::Class* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700422 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800423 void FixupObject(mirror::Object* orig, mirror::Object* copy)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700424 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100425 void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700426 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800427 void FixupPointerArray(mirror::Object* dst,
428 mirror::PointerArray* arr,
429 mirror::Class* klass,
430 Bin array_type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700431 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700432
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700433 // Get quick code for non-resolution/imt_conflict/abstract method.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800434 const uint8_t* GetQuickCode(ArtMethod* method,
435 const ImageInfo& image_info,
436 bool* quick_is_interpreted)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700437 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang98d1cc82014-05-15 17:02:16 -0700438
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800439 // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800440 size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800441
Mathieu Chartiere401d142015-04-22 13:56:20 -0700442 // Return true if a method is likely to be dirtied at runtime.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700443 bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700444
445 // Assign the offset for an ArtMethod.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800446 void AssignMethodOffset(ArtMethod* method,
447 NativeObjectRelocationType type,
Vladimir Marko944da602016-02-19 12:27:55 +0000448 size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700449 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700450
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700451 void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
Artem Udovichenkoa62cb9b2016-06-30 09:18:25 +0000452
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700453 // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
454 // relocation.
455 void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700456 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700457
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800458 // Return true if klass is loaded by the boot class loader but not in the boot image.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700459 bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800460
Mathieu Chartier901e0702016-02-19 13:42:48 -0800461 // Return true if klass depends on a boot class loader non image class. We want to prune these
462 // classes since we do not want any boot class loader classes in the image. This means that
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800463 // we also cannot have any classes which refer to these boot class loader non image classes.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800464 // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
465 // driver.
466 bool PruneAppImageClass(mirror::Class* klass)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700467 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800468
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800469 // early_exit is true if we had a cyclic dependency anywhere down the chain.
Mathieu Chartier901e0702016-02-19 13:42:48 -0800470 bool PruneAppImageClassInternal(mirror::Class* klass,
471 bool* early_exit,
472 std::unordered_set<mirror::Class*>* visited)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700473 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier945c1c12015-11-24 15:37:12 -0800474
Mathieu Chartier496577f2016-09-20 15:33:31 -0700475 bool IsMultiImage() const {
476 return image_infos_.size() > 1;
477 }
478
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700479 static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
480
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700481 uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko05792b92015-08-03 11:56:49 +0100482
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800483 // Location of where the object will be when the image is loaded at runtime.
Vladimir Marko05792b92015-08-03 11:56:49 +0100484 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700485 T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampe245ee002014-12-04 21:25:04 -0800486
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800487 // Location of where the temporary copy of the object currently is.
488 template <typename T>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700489 T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4b00d342015-11-13 10:42:08 -0800490
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800491 // Return true of obj is inside of the boot image space. This may only return true if we are
492 // compiling an app image.
493 bool IsInBootImage(const void* obj) const;
494
495 // Return true if ptr is within the boot oat file.
496 bool IsInBootOatFile(const void* ptr) const;
497
Vladimir Marko944da602016-02-19 12:27:55 +0000498 // Get the index of the oat file associated with the object.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700499 size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Haodcdc85b2015-12-04 14:06:18 -0800500
Vladimir Marko944da602016-02-19 12:27:55 +0000501 // The oat index for shared data in multi-image and all data in single-image compilation.
502 size_t GetDefaultOatIndex() const {
503 return 0u;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800504 }
505
Vladimir Marko944da602016-02-19 12:27:55 +0000506 ImageInfo& GetImageInfo(size_t oat_index) {
507 return image_infos_[oat_index];
508 }
509
510 const ImageInfo& GetImageInfo(size_t oat_index) const {
511 return image_infos_[oat_index];
512 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800513
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800514 // Find an already strong interned string in the other images or in the boot image. Used to
515 // remove duplicates in the multi image and app image case.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700516 mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800517
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700518 // Return true if there already exists a native allocation for an object.
519 bool NativeRelocationAssigned(void* ptr) const;
520
Brian Carlstrom7940e442013-07-12 13:46:57 -0700521 const CompilerDriver& compiler_driver_;
522
Jeff Haodcdc85b2015-12-04 14:06:18 -0800523 // Beginning target image address for the first image.
524 uint8_t* global_image_begin_;
Vladimir Markof4da6752014-08-01 19:04:18 +0100525
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800526 // Offset from image_begin_ to where the first object is in image_.
527 size_t image_objects_offset_begin_;
528
Mathieu Chartiere401d142015-04-22 13:56:20 -0700529 // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
530 // to keep track. These include vtable arrays, iftable arrays, and dex caches.
531 std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
532
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700533 // Saved hash codes. We use these to restore lockwords which were temporarily used to have
534 // forwarding addresses as well as copying over hash codes.
535 std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800536
Mathieu Chartier496577f2016-09-20 15:33:31 -0700537 // Oat index map for objects.
538 std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
539
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800540 // Boolean flags.
Igor Murashkin46774762014-10-22 11:37:02 -0700541 const bool compile_pic_;
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800542 const bool compile_app_image_;
543
Mathieu Chartier2d721012014-11-10 11:08:06 -0800544 // Size of pointers on the target architecture.
Andreas Gampe542451c2016-07-26 09:02:02 -0700545 PointerSize target_ptr_size_;
Mathieu Chartier2d721012014-11-10 11:08:06 -0800546
Vladimir Marko944da602016-02-19 12:27:55 +0000547 // Image data indexed by the oat file index.
548 dchecked_vector<ImageInfo> image_infos_;
Igor Murashkinf5b4c502014-11-14 15:01:59 -0800549
Mathieu Chartiere401d142015-04-22 13:56:20 -0700550 // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
551 // have one entry per art field for convenience. ArtFields are placed right after the end of the
552 // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700553 struct NativeObjectRelocation {
Vladimir Marko944da602016-02-19 12:27:55 +0000554 size_t oat_index;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700555 uintptr_t offset;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700556 NativeObjectRelocationType type;
557
558 bool IsArtMethodRelocation() const {
559 return type == kNativeObjectRelocationTypeArtMethodClean ||
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700560 type == kNativeObjectRelocationTypeArtMethodDirty ||
561 type == kNativeObjectRelocationTypeRuntimeMethod;
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700562 }
Mathieu Chartiere401d142015-04-22 13:56:20 -0700563 };
Mathieu Chartier54d220e2015-07-30 16:20:06 -0700564 std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700565
Mathieu Chartiere401d142015-04-22 13:56:20 -0700566 // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
567 ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
568
569 // Counters for measurements, used for logging only.
570 uint64_t dirty_methods_;
571 uint64_t clean_methods_;
Andreas Gampe245ee002014-12-04 21:25:04 -0800572
Mathieu Chartiera808bac2015-11-05 16:33:15 -0800573 // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
Mathieu Chartierda5b28a2015-11-05 08:03:47 -0800574 std::unordered_map<mirror::Class*, bool> prune_class_memo_;
575
Mathieu Chartier67ad20e2015-12-09 15:41:09 -0800576 // Class loaders with a class table to write out. There should only be one class loader because
577 // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
578 // null is a valid entry.
Mathieu Chartier208a5cb2015-12-02 15:44:07 -0800579 std::unordered_set<mirror::ClassLoader*> class_loaders_;
580
Mathieu Chartierceb07b32015-12-10 09:33:21 -0800581 // Which mode the image is stored as, see image.h
582 const ImageHeader::StorageMode image_storage_mode_;
583
Vladimir Marko944da602016-02-19 12:27:55 +0000584 // The file names of oat files.
585 const std::vector<const char*>& oat_filenames_;
586
587 // Map of dex files to the indexes of oat files that they were compiled into.
588 const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800589
Vladimir Markoad06b982016-11-17 16:38:59 +0000590 class ComputeLazyFieldsForClassesVisitor;
591 class FixupClassVisitor;
592 class FixupRootVisitor;
593 class FixupVisitor;
Mathieu Chartier496577f2016-09-20 15:33:31 -0700594 class GetRootsVisitor;
Vladimir Markoad06b982016-11-17 16:38:59 +0000595 class NativeLocationVisitor;
Vladimir Markoc5798bf2016-12-09 10:20:54 +0000596 class PruneClassesVisitor;
597 class PruneClassLoaderClassesVisitor;
Mathieu Chartier496577f2016-09-20 15:33:31 -0700598 class VisitReferencesVisitor;
Vladimir Markoad06b982016-11-17 16:38:59 +0000599
Mathieu Chartierb7ea3ac2014-03-24 16:54:46 -0700600 DISALLOW_COPY_AND_ASSIGN(ImageWriter);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700601};
602
603} // namespace art
604
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700605#endif // ART_COMPILER_IMAGE_WRITER_H_