blob: 77d689d4dbb06ae525c294bd2c3225c3247f3ec1 [file] [log] [blame]
Vladimir Markod8dbc8d2017-09-20 13:37:47 +01001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_LINKER_LINKER_PATCH_H_
18#define ART_COMPILER_LINKER_LINKER_PATCH_H_
19
20#include <iosfwd>
21#include <stdint.h>
22
Andreas Gampe57943812017-12-06 21:39:13 -080023#include <android-base/logging.h>
24
Vladimir Markod8dbc8d2017-09-20 13:37:47 +010025#include "base/bit_utils.h"
David Sehre6564f42018-03-19 08:39:26 -070026#include "dex/method_reference.h"
Vladimir Markod8dbc8d2017-09-20 13:37:47 +010027
28namespace art {
29
30class DexFile;
31
32namespace linker {
33
34class LinkerPatch {
35 public:
36 // Note: We explicitly specify the underlying type of the enum because GCC
37 // would otherwise select a bigger underlying type and then complain that
38 // 'art::LinkerPatch::patch_type_' is too small to hold all
39 // values of 'enum class art::LinkerPatch::Type'
40 // which is ridiculous given we have only a handful of values here. If we
41 // choose to squeeze the Type into fewer than 8 bits, we'll have to declare
42 // patch_type_ as an uintN_t and do explicit static_cast<>s.
43 enum class Type : uint8_t {
44 kMethodRelative, // NOTE: Actual patching is instruction_set-dependent.
45 kMethodBssEntry, // NOTE: Actual patching is instruction_set-dependent.
46 kCall,
47 kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
48 kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
49 kTypeClassTable, // NOTE: Actual patching is instruction_set-dependent.
50 kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
51 kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
52 kStringInternTable, // NOTE: Actual patching is instruction_set-dependent.
53 kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
54 kBakerReadBarrierBranch, // NOTE: Actual patching is instruction_set-dependent.
55 };
56
57 static LinkerPatch RelativeMethodPatch(size_t literal_offset,
58 const DexFile* target_dex_file,
59 uint32_t pc_insn_offset,
60 uint32_t target_method_idx) {
61 LinkerPatch patch(literal_offset, Type::kMethodRelative, target_dex_file);
62 patch.method_idx_ = target_method_idx;
63 patch.pc_insn_offset_ = pc_insn_offset;
64 return patch;
65 }
66
67 static LinkerPatch MethodBssEntryPatch(size_t literal_offset,
68 const DexFile* target_dex_file,
69 uint32_t pc_insn_offset,
70 uint32_t target_method_idx) {
71 LinkerPatch patch(literal_offset, Type::kMethodBssEntry, target_dex_file);
72 patch.method_idx_ = target_method_idx;
73 patch.pc_insn_offset_ = pc_insn_offset;
74 return patch;
75 }
76
77 static LinkerPatch CodePatch(size_t literal_offset,
78 const DexFile* target_dex_file,
79 uint32_t target_method_idx) {
80 LinkerPatch patch(literal_offset, Type::kCall, target_dex_file);
81 patch.method_idx_ = target_method_idx;
82 return patch;
83 }
84
85 static LinkerPatch RelativeCodePatch(size_t literal_offset,
86 const DexFile* target_dex_file,
87 uint32_t target_method_idx) {
88 LinkerPatch patch(literal_offset, Type::kCallRelative, target_dex_file);
89 patch.method_idx_ = target_method_idx;
90 return patch;
91 }
92
93 static LinkerPatch RelativeTypePatch(size_t literal_offset,
94 const DexFile* target_dex_file,
95 uint32_t pc_insn_offset,
96 uint32_t target_type_idx) {
97 LinkerPatch patch(literal_offset, Type::kTypeRelative, target_dex_file);
98 patch.type_idx_ = target_type_idx;
99 patch.pc_insn_offset_ = pc_insn_offset;
100 return patch;
101 }
102
103 static LinkerPatch TypeClassTablePatch(size_t literal_offset,
104 const DexFile* target_dex_file,
105 uint32_t pc_insn_offset,
106 uint32_t target_type_idx) {
107 LinkerPatch patch(literal_offset, Type::kTypeClassTable, target_dex_file);
108 patch.type_idx_ = target_type_idx;
109 patch.pc_insn_offset_ = pc_insn_offset;
110 return patch;
111 }
112
113 static LinkerPatch TypeBssEntryPatch(size_t literal_offset,
114 const DexFile* target_dex_file,
115 uint32_t pc_insn_offset,
116 uint32_t target_type_idx) {
117 LinkerPatch patch(literal_offset, Type::kTypeBssEntry, target_dex_file);
118 patch.type_idx_ = target_type_idx;
119 patch.pc_insn_offset_ = pc_insn_offset;
120 return patch;
121 }
122
123 static LinkerPatch RelativeStringPatch(size_t literal_offset,
124 const DexFile* target_dex_file,
125 uint32_t pc_insn_offset,
126 uint32_t target_string_idx) {
127 LinkerPatch patch(literal_offset, Type::kStringRelative, target_dex_file);
128 patch.string_idx_ = target_string_idx;
129 patch.pc_insn_offset_ = pc_insn_offset;
130 return patch;
131 }
132
133 static LinkerPatch StringInternTablePatch(size_t literal_offset,
134 const DexFile* target_dex_file,
135 uint32_t pc_insn_offset,
136 uint32_t target_string_idx) {
137 LinkerPatch patch(literal_offset, Type::kStringInternTable, target_dex_file);
138 patch.string_idx_ = target_string_idx;
139 patch.pc_insn_offset_ = pc_insn_offset;
140 return patch;
141 }
142
143 static LinkerPatch StringBssEntryPatch(size_t literal_offset,
144 const DexFile* target_dex_file,
145 uint32_t pc_insn_offset,
146 uint32_t target_string_idx) {
147 LinkerPatch patch(literal_offset, Type::kStringBssEntry, target_dex_file);
148 patch.string_idx_ = target_string_idx;
149 patch.pc_insn_offset_ = pc_insn_offset;
150 return patch;
151 }
152
153 static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
154 uint32_t custom_value1 = 0u,
155 uint32_t custom_value2 = 0u) {
156 LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, nullptr);
157 patch.baker_custom_value1_ = custom_value1;
158 patch.baker_custom_value2_ = custom_value2;
159 return patch;
160 }
161
162 LinkerPatch(const LinkerPatch& other) = default;
163 LinkerPatch& operator=(const LinkerPatch& other) = default;
164
165 size_t LiteralOffset() const {
166 return literal_offset_;
167 }
168
169 Type GetType() const {
170 return patch_type_;
171 }
172
173 bool IsPcRelative() const {
174 switch (GetType()) {
175 case Type::kMethodRelative:
176 case Type::kMethodBssEntry:
177 case Type::kCallRelative:
178 case Type::kTypeRelative:
179 case Type::kTypeClassTable:
180 case Type::kTypeBssEntry:
181 case Type::kStringRelative:
182 case Type::kStringInternTable:
183 case Type::kStringBssEntry:
184 case Type::kBakerReadBarrierBranch:
185 return true;
186 default:
187 return false;
188 }
189 }
190
191 MethodReference TargetMethod() const {
192 DCHECK(patch_type_ == Type::kMethodRelative ||
193 patch_type_ == Type::kMethodBssEntry ||
194 patch_type_ == Type::kCall ||
195 patch_type_ == Type::kCallRelative);
196 return MethodReference(target_dex_file_, method_idx_);
197 }
198
199 const DexFile* TargetTypeDexFile() const {
200 DCHECK(patch_type_ == Type::kTypeRelative ||
201 patch_type_ == Type::kTypeClassTable ||
202 patch_type_ == Type::kTypeBssEntry);
203 return target_dex_file_;
204 }
205
206 dex::TypeIndex TargetTypeIndex() const {
207 DCHECK(patch_type_ == Type::kTypeRelative ||
208 patch_type_ == Type::kTypeClassTable ||
209 patch_type_ == Type::kTypeBssEntry);
210 return dex::TypeIndex(type_idx_);
211 }
212
213 const DexFile* TargetStringDexFile() const {
214 DCHECK(patch_type_ == Type::kStringRelative ||
215 patch_type_ == Type::kStringInternTable ||
216 patch_type_ == Type::kStringBssEntry);
217 return target_dex_file_;
218 }
219
220 dex::StringIndex TargetStringIndex() const {
221 DCHECK(patch_type_ == Type::kStringRelative ||
222 patch_type_ == Type::kStringInternTable ||
223 patch_type_ == Type::kStringBssEntry);
224 return dex::StringIndex(string_idx_);
225 }
226
227 uint32_t PcInsnOffset() const {
228 DCHECK(patch_type_ == Type::kMethodRelative ||
229 patch_type_ == Type::kMethodBssEntry ||
230 patch_type_ == Type::kTypeRelative ||
231 patch_type_ == Type::kTypeClassTable ||
232 patch_type_ == Type::kTypeBssEntry ||
233 patch_type_ == Type::kStringRelative ||
234 patch_type_ == Type::kStringInternTable ||
235 patch_type_ == Type::kStringBssEntry);
236 return pc_insn_offset_;
237 }
238
239 uint32_t GetBakerCustomValue1() const {
240 DCHECK(patch_type_ == Type::kBakerReadBarrierBranch);
241 return baker_custom_value1_;
242 }
243
244 uint32_t GetBakerCustomValue2() const {
245 DCHECK(patch_type_ == Type::kBakerReadBarrierBranch);
246 return baker_custom_value2_;
247 }
248
249 private:
250 LinkerPatch(size_t literal_offset, Type patch_type, const DexFile* target_dex_file)
251 : target_dex_file_(target_dex_file),
252 literal_offset_(literal_offset),
253 patch_type_(patch_type) {
254 cmp1_ = 0u;
255 cmp2_ = 0u;
256 // The compiler rejects methods that are too big, so the compiled code
257 // of a single method really shouln't be anywhere close to 16MiB.
258 DCHECK(IsUint<24>(literal_offset));
259 }
260
261 const DexFile* target_dex_file_;
262 // TODO: Clean up naming. Some patched locations are literals but others are not.
263 uint32_t literal_offset_ : 24; // Method code size up to 16MiB.
264 Type patch_type_ : 8;
265 union {
266 uint32_t cmp1_; // Used for relational operators.
267 uint32_t method_idx_; // Method index for Call/Method patches.
268 uint32_t type_idx_; // Type index for Type patches.
269 uint32_t string_idx_; // String index for String patches.
270 uint32_t baker_custom_value1_;
271 static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
272 static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
273 static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators");
274 static_assert(sizeof(baker_custom_value1_) == sizeof(cmp1_), "needed by relational operators");
275 };
276 union {
277 // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`.
278 // This allows a hashing function to treat an array of linker patches as raw memory.
279 size_t cmp2_; // Used for relational operators.
280 // Literal offset of the insn loading PC (same as literal_offset if it's the same insn,
281 // may be different if the PC-relative addressing needs multiple insns).
282 uint32_t pc_insn_offset_;
283 uint32_t baker_custom_value2_;
284 static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators");
285 static_assert(sizeof(baker_custom_value2_) <= sizeof(cmp2_), "needed by relational operators");
286 };
287
288 friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs);
289 friend bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs);
290};
291std::ostream& operator<<(std::ostream& os, const LinkerPatch::Type& type);
292
293inline bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs) {
294 return lhs.literal_offset_ == rhs.literal_offset_ &&
295 lhs.patch_type_ == rhs.patch_type_ &&
296 lhs.target_dex_file_ == rhs.target_dex_file_ &&
297 lhs.cmp1_ == rhs.cmp1_ &&
298 lhs.cmp2_ == rhs.cmp2_;
299}
300
301inline bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs) {
302 return (lhs.literal_offset_ != rhs.literal_offset_) ? lhs.literal_offset_ < rhs.literal_offset_
303 : (lhs.patch_type_ != rhs.patch_type_) ? lhs.patch_type_ < rhs.patch_type_
304 : (lhs.target_dex_file_ != rhs.target_dex_file_) ? lhs.target_dex_file_ < rhs.target_dex_file_
305 : (lhs.cmp1_ != rhs.cmp1_) ? lhs.cmp1_ < rhs.cmp1_
306 : lhs.cmp2_ < rhs.cmp2_;
307}
308
309} // namespace linker
310} // namespace art
311
312#endif // ART_COMPILER_LINKER_LINKER_PATCH_H_