blob: 95a17c660c753a97dbe5f0788ee8fe513f484b22 [file] [log] [blame]
Igor Murashkinfc1ccd72015-07-30 15:11:09 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "lambda/closure.h"
18
19#include "base/logging.h"
20#include "lambda/art_lambda_method.h"
21#include "runtime/mirror/object_reference.h"
22
23static constexpr const bool kClosureSupportsReferences = false;
24static constexpr const bool kClosureSupportsGarbageCollection = false;
25
26namespace art {
27namespace lambda {
28
29template <typename T>
30// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
31const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
32 // Do not DCHECK here with existing helpers since most of them will call into this function.
33 return reinterpret_cast<const uint8_t*>(captured_) + offset;
34}
35
36size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
37 switch (variable_type) {
38 case ShortyFieldType::kLambda:
39 {
40 return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
41 }
42 default:
43 DCHECK(variable_type.IsStaticSize());
44 return variable_type.GetStaticSize();
45 }
46}
47
48// Templatize the flags to give the compiler a fighting chance to eliminate
49// any unnecessary code through different uses of this function.
50template <Closure::VariableInfo::Flags flags>
51inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
52 size_t upto_index) const {
53 DCHECK(type_descriptor != nullptr);
54
55 VariableInfo result;
56
57 ShortyFieldType last_type;
58 size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
59 size_t prev_offset = 0;
60 size_t count = 0;
61
62 while ((type_descriptor =
63 ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
64 count++;
65
66 if (flags & VariableInfo::kOffset) {
67 // Accumulate the sizes of all preceding captured variables as the current offset only.
68 offset += prev_offset;
69 prev_offset = GetCapturedVariableSize(last_type, offset);
70 }
71
72 if ((count > upto_index)) {
73 break;
74 }
75 }
76
77 if (flags & VariableInfo::kVariableType) {
78 result.variable_type_ = last_type;
79 }
80
81 if (flags & VariableInfo::kIndex) {
82 result.index_ = count;
83 }
84
85 if (flags & VariableInfo::kCount) {
86 result.count_ = count;
87 }
88
89 if (flags & VariableInfo::kOffset) {
90 result.offset_ = offset;
91 }
92
93 // TODO: We should probably store the result of this in the ArtLambdaMethod,
94 // to avoid re-computing the data every single time for static closures.
95 return result;
96}
97
98size_t Closure::GetCapturedVariablesSize() const {
99 const size_t captured_variable_offset = offsetof(Closure, captured_);
100 DCHECK_GE(GetSize(), captured_variable_offset); // Prevent underflows.
101 return GetSize() - captured_variable_offset;
102}
103
104size_t Closure::GetSize() const {
105 const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
106 if (LIKELY(lambda_info_->IsStaticSize())) {
107 return static_closure_size;
108 }
109
110 DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
111 const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
112 // The dynamic size better be at least as big as the static size.
113 DCHECK_GE(dynamic_closure_size, static_closure_size);
114
115 return dynamic_closure_size;
116}
117
118void Closure::CopyTo(void* target, size_t target_size) const {
119 DCHECK_GE(target_size, GetSize());
120
121 // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
122 static_assert(kClosureSupportsReferences == false,
123 "Do not use memcpy with readbarrier references");
124 memcpy(target, this, GetSize());
125}
126
127size_t Closure::GetNumberOfCapturedVariables() const {
128 // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
129 VariableInfo variable_info =
130 ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
131 VariableInfo::kUpToIndexMax);
132 size_t count = variable_info.count_;
133 // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
134 DCHECK_LE(count, GetCapturedVariablesSize());
135 return count;
136}
137
138const char* Closure::GetCapturedVariablesTypeDescriptor() const {
139 return lambda_info_->GetCapturedVariablesTypeDescriptor();
140}
141
142ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
143 DCHECK_LT(index, GetNumberOfCapturedVariables());
144
145 VariableInfo variable_info =
146 ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
147 index);
148
149 return variable_info.variable_type_;
150}
151
152uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
153 DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
154
155 ShortyFieldType variable_type;
156 size_t offset;
157 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
158
159 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
160 // so that we can avoid this nonsense regarding memcpy always overflowing.
161 // Plus, this additional switching seems redundant since the interpreter
162 // would've done it already, and knows the exact type.
163 uint32_t result = 0;
164 static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
165 "result must be a primitive narrow type");
166 switch (variable_type) {
167 case ShortyFieldType::kBoolean:
168 CopyUnsafeAtOffset<bool>(offset, &result);
169 break;
170 case ShortyFieldType::kByte:
171 CopyUnsafeAtOffset<uint8_t>(offset, &result);
172 break;
173 case ShortyFieldType::kChar:
174 CopyUnsafeAtOffset<uint16_t>(offset, &result);
175 break;
176 case ShortyFieldType::kShort:
177 CopyUnsafeAtOffset<int16_t>(offset, &result);
178 break;
179 case ShortyFieldType::kInt:
180 CopyUnsafeAtOffset<int32_t>(offset, &result);
181 break;
182 case ShortyFieldType::kFloat:
183 // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
184 // The interpreter's invoke seems to only special case references and wides,
185 // everything else is treated as a generic 32-bit pattern.
186 CopyUnsafeAtOffset<float>(offset, &result);
187 break;
188 default:
189 LOG(FATAL)
190 << "expected a valid narrow primitive shorty type but got "
191 << static_cast<char>(variable_type);
192 UNREACHABLE();
193 }
194
195 return result;
196}
197
198uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
199 DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
200
201 ShortyFieldType variable_type;
202 size_t offset;
203 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
204
205 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
206 // so that we can avoid this nonsense regarding memcpy always overflowing.
207 // Plus, this additional switching seems redundant since the interpreter
208 // would've done it already, and knows the exact type.
209 uint64_t result = 0;
210 static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
211 "result must be a primitive wide type");
212 switch (variable_type) {
213 case ShortyFieldType::kLong:
214 CopyUnsafeAtOffset<int64_t>(offset, &result);
215 break;
216 case ShortyFieldType::kDouble:
217 CopyUnsafeAtOffset<double>(offset, &result);
218 break;
219 default:
220 LOG(FATAL)
221 << "expected a valid primitive wide shorty type but got "
222 << static_cast<char>(variable_type);
223 UNREACHABLE();
224 }
225
226 return result;
227}
228
229mirror::Object* Closure::GetCapturedObject(size_t index) const {
230 DCHECK(GetCapturedShortyType(index).IsObject());
231
232 ShortyFieldType variable_type;
233 size_t offset;
234 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
235
236 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
237 // so that we can avoid this nonsense regarding memcpy always overflowing.
238 // Plus, this additional switching seems redundant since the interpreter
239 // would've done it already, and knows the exact type.
240 mirror::Object* result = nullptr;
241 static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
242 "result must be an object type");
243 switch (variable_type) {
244 case ShortyFieldType::kObject:
245 // TODO: This seems unsafe. This may need to use gcroots.
246 static_assert(kClosureSupportsGarbageCollection == false,
247 "May need GcRoots and definitely need mutator locks");
248 {
249 mirror::CompressedReference<mirror::Object> compressed_result;
250 CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
251 result = compressed_result.AsMirrorPtr();
252 }
253 break;
254 default:
255 CHECK(false)
256 << "expected a valid shorty type but got " << static_cast<char>(variable_type);
257 UNREACHABLE();
258 }
259
260 return result;
261}
262
263size_t Closure::GetCapturedClosureSize(size_t index) const {
264 DCHECK(GetCapturedShortyType(index).IsLambda());
265 size_t offset = GetCapturedVariableOffset(index);
266
267 auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
268 size_t closure_size = GetClosureSize(captured_ptr + offset);
269
270 return closure_size;
271}
272
273void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
274 DCHECK(GetCapturedShortyType(index).IsLambda());
275 size_t offset = GetCapturedVariableOffset(index);
276
277 auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
278 size_t closure_size = GetClosureSize(captured_ptr + offset);
279
280 static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
281 "result must be a lambda type");
282
283 CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
284}
285
286size_t Closure::GetCapturedVariableOffset(size_t index) const {
287 VariableInfo variable_info =
288 ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
289 index);
290
291 size_t offset = variable_info.offset_;
292
293 return offset;
294}
295
296void Closure::GetCapturedVariableTypeAndOffset(size_t index,
297 ShortyFieldType* out_type,
298 size_t* out_offset) const {
299 DCHECK(out_type != nullptr);
300 DCHECK(out_offset != nullptr);
301
302 static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
303 static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
304 VariableInfo variable_info =
305 ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
306 index);
307
308 ShortyFieldType variable_type = variable_info.variable_type_;
309 size_t offset = variable_info.offset_;
310
311 *out_type = variable_type;
312 *out_offset = offset;
313}
314
315template <typename T>
316void Closure::CopyUnsafeAtOffset(size_t offset,
317 void* destination,
318 size_t src_size,
319 size_t destination_room) const {
320 DCHECK_GE(destination_room, src_size);
321 const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
322 memcpy(destination, data_ptr, sizeof(T));
323}
324
325// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
326// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
327size_t Closure::GetClosureSize(const uint8_t* closure) {
328 DCHECK(closure != nullptr);
329
330 static_assert(!std::is_base_of<mirror::Object, Closure>::value,
331 "It might be unsafe to call memcpy on a managed object");
332
333 // Safe as long as it's not a mirror Object.
334 // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
335 // we aren't trying to copy mirror::Object data around.
336 ArtLambdaMethod* closure_info;
337 memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
338
339 if (LIKELY(closure_info->IsStaticSize())) {
340 return closure_info->GetStaticClosureSize();
341 }
342
343 // The size is dynamic, so we need to read it from captured_variables_ portion.
344 size_t dynamic_size;
345 memcpy(&dynamic_size,
346 closure + offsetof(Closure, captured_[0].dynamic_.size_),
347 sizeof(dynamic_size));
348 static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
349 "Dynamic size type must match the structural type of the size");
350
351 DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
352 return dynamic_size;
353}
354
355size_t Closure::GetStartingOffset() const {
356 static constexpr const size_t captured_offset = offsetof(Closure, captured_);
357 if (LIKELY(lambda_info_->IsStaticSize())) {
358 return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
359 } else {
360 return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
361 }
362}
363
364} // namespace lambda
365} // namespace art