blob: f935e049fda0d8c371fd22f69717d4d15f75e0c1 [file] [log] [blame]
Igor Murashkinfc1ccd72015-07-30 15:11:09 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "lambda/closure.h"
18
19#include "base/logging.h"
20#include "lambda/art_lambda_method.h"
21#include "runtime/mirror/object_reference.h"
22
Igor Murashkinfc1ccd72015-07-30 15:11:09 -070023namespace art {
24namespace lambda {
25
26template <typename T>
27// TODO: can I return T __attribute__((__aligned__(1)))* here instead?
28const uint8_t* Closure::GetUnsafeAtOffset(size_t offset) const {
29 // Do not DCHECK here with existing helpers since most of them will call into this function.
30 return reinterpret_cast<const uint8_t*>(captured_) + offset;
31}
32
33size_t Closure::GetCapturedVariableSize(ShortyFieldType variable_type, size_t offset) const {
34 switch (variable_type) {
35 case ShortyFieldType::kLambda:
36 {
37 return GetClosureSize(GetUnsafeAtOffset<Closure>(offset));
38 }
39 default:
40 DCHECK(variable_type.IsStaticSize());
41 return variable_type.GetStaticSize();
42 }
43}
44
45// Templatize the flags to give the compiler a fighting chance to eliminate
46// any unnecessary code through different uses of this function.
47template <Closure::VariableInfo::Flags flags>
48inline Closure::VariableInfo Closure::ParseTypeDescriptor(const char* type_descriptor,
49 size_t upto_index) const {
50 DCHECK(type_descriptor != nullptr);
51
52 VariableInfo result;
53
54 ShortyFieldType last_type;
55 size_t offset = (flags & VariableInfo::kOffset) ? GetStartingOffset() : 0;
56 size_t prev_offset = 0;
57 size_t count = 0;
58
59 while ((type_descriptor =
60 ShortyFieldType::ParseFromFieldTypeDescriptor(type_descriptor, &last_type)) != nullptr) {
61 count++;
62
63 if (flags & VariableInfo::kOffset) {
64 // Accumulate the sizes of all preceding captured variables as the current offset only.
65 offset += prev_offset;
66 prev_offset = GetCapturedVariableSize(last_type, offset);
67 }
68
69 if ((count > upto_index)) {
70 break;
71 }
72 }
73
74 if (flags & VariableInfo::kVariableType) {
75 result.variable_type_ = last_type;
76 }
77
78 if (flags & VariableInfo::kIndex) {
79 result.index_ = count;
80 }
81
82 if (flags & VariableInfo::kCount) {
83 result.count_ = count;
84 }
85
86 if (flags & VariableInfo::kOffset) {
87 result.offset_ = offset;
88 }
89
90 // TODO: We should probably store the result of this in the ArtLambdaMethod,
91 // to avoid re-computing the data every single time for static closures.
92 return result;
93}
94
95size_t Closure::GetCapturedVariablesSize() const {
96 const size_t captured_variable_offset = offsetof(Closure, captured_);
97 DCHECK_GE(GetSize(), captured_variable_offset); // Prevent underflows.
98 return GetSize() - captured_variable_offset;
99}
100
101size_t Closure::GetSize() const {
102 const size_t static_closure_size = lambda_info_->GetStaticClosureSize();
103 if (LIKELY(lambda_info_->IsStaticSize())) {
104 return static_closure_size;
105 }
106
107 DCHECK_GE(static_closure_size, sizeof(captured_[0].dynamic_.size_));
108 const size_t dynamic_closure_size = captured_[0].dynamic_.size_;
109 // The dynamic size better be at least as big as the static size.
110 DCHECK_GE(dynamic_closure_size, static_closure_size);
111
112 return dynamic_closure_size;
113}
114
115void Closure::CopyTo(void* target, size_t target_size) const {
116 DCHECK_GE(target_size, GetSize());
117
118 // TODO: using memcpy is unsafe with read barriers, fix this once we add reference support
119 static_assert(kClosureSupportsReferences == false,
120 "Do not use memcpy with readbarrier references");
121 memcpy(target, this, GetSize());
122}
123
Igor Murashkin6918bf12015-09-27 19:19:06 -0700124ArtMethod* Closure::GetTargetMethod() const {
125 return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
126}
127
Igor Murashkin457e8742015-10-22 17:37:50 -0700128ArtLambdaMethod* Closure::GetLambdaInfo() const {
129 return const_cast<ArtLambdaMethod*>(lambda_info_);
130}
131
Igor Murashkin6918bf12015-09-27 19:19:06 -0700132uint32_t Closure::GetHashCode() const {
133 // Start with a non-zero constant, a prime number.
134 uint32_t result = 17;
135
136 // Include the hash with the ArtMethod.
137 {
138 uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
139 result = 31 * result + Low32Bits(method);
140 if (sizeof(method) == sizeof(uint64_t)) {
141 result = 31 * result + High32Bits(method);
142 }
143 }
144
145 // Include a hash for each captured variable.
146 for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
147 // TODO: not safe for GC-able values since the address can move and the hash code would change.
148 uint8_t captured_variable_raw_value;
149 CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
150
151 result = 31 * result + captured_variable_raw_value;
152 }
153
154 // TODO: Fix above loop to work for objects and lambdas.
155 static_assert(kClosureSupportsGarbageCollection == false,
156 "Need to update above loop to read the hash code from the "
157 "objects and lambdas recursively");
158
159 return result;
160}
161
162bool Closure::ReferenceEquals(const Closure* other) const {
163 DCHECK(other != nullptr);
164
165 // TODO: Need rework to use read barriers once closures have references inside of them that can
166 // move. Until then, it's safe to just compare the data inside of it directly.
167 static_assert(kClosureSupportsReferences == false,
168 "Unsafe to use memcmp in read barrier collector");
169
170 if (GetSize() != other->GetSize()) {
171 return false;
172 }
173
174 return memcmp(this, other, GetSize());
175}
176
Igor Murashkinfc1ccd72015-07-30 15:11:09 -0700177size_t Closure::GetNumberOfCapturedVariables() const {
178 // TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
179 VariableInfo variable_info =
180 ParseTypeDescriptor<VariableInfo::kCount>(GetCapturedVariablesTypeDescriptor(),
181 VariableInfo::kUpToIndexMax);
182 size_t count = variable_info.count_;
183 // Assuming each variable was 1 byte, the size should always be greater or equal than the count.
184 DCHECK_LE(count, GetCapturedVariablesSize());
185 return count;
186}
187
188const char* Closure::GetCapturedVariablesTypeDescriptor() const {
189 return lambda_info_->GetCapturedVariablesTypeDescriptor();
190}
191
192ShortyFieldType Closure::GetCapturedShortyType(size_t index) const {
193 DCHECK_LT(index, GetNumberOfCapturedVariables());
194
195 VariableInfo variable_info =
196 ParseTypeDescriptor<VariableInfo::kVariableType>(GetCapturedVariablesTypeDescriptor(),
197 index);
198
199 return variable_info.variable_type_;
200}
201
202uint32_t Closure::GetCapturedPrimitiveNarrow(size_t index) const {
203 DCHECK(GetCapturedShortyType(index).IsPrimitiveNarrow());
204
205 ShortyFieldType variable_type;
206 size_t offset;
207 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
208
209 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
210 // so that we can avoid this nonsense regarding memcpy always overflowing.
211 // Plus, this additional switching seems redundant since the interpreter
212 // would've done it already, and knows the exact type.
213 uint32_t result = 0;
214 static_assert(ShortyFieldTypeTraits::IsPrimitiveNarrowType<decltype(result)>(),
215 "result must be a primitive narrow type");
216 switch (variable_type) {
217 case ShortyFieldType::kBoolean:
218 CopyUnsafeAtOffset<bool>(offset, &result);
219 break;
220 case ShortyFieldType::kByte:
221 CopyUnsafeAtOffset<uint8_t>(offset, &result);
222 break;
223 case ShortyFieldType::kChar:
224 CopyUnsafeAtOffset<uint16_t>(offset, &result);
225 break;
226 case ShortyFieldType::kShort:
227 CopyUnsafeAtOffset<int16_t>(offset, &result);
228 break;
229 case ShortyFieldType::kInt:
230 CopyUnsafeAtOffset<int32_t>(offset, &result);
231 break;
232 case ShortyFieldType::kFloat:
233 // XX: Maybe there should just be a GetCapturedPrimitive<T> to avoid this shuffle?
234 // The interpreter's invoke seems to only special case references and wides,
235 // everything else is treated as a generic 32-bit pattern.
236 CopyUnsafeAtOffset<float>(offset, &result);
237 break;
238 default:
239 LOG(FATAL)
240 << "expected a valid narrow primitive shorty type but got "
241 << static_cast<char>(variable_type);
242 UNREACHABLE();
243 }
244
245 return result;
246}
247
248uint64_t Closure::GetCapturedPrimitiveWide(size_t index) const {
249 DCHECK(GetCapturedShortyType(index).IsPrimitiveWide());
250
251 ShortyFieldType variable_type;
252 size_t offset;
253 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
254
255 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
256 // so that we can avoid this nonsense regarding memcpy always overflowing.
257 // Plus, this additional switching seems redundant since the interpreter
258 // would've done it already, and knows the exact type.
259 uint64_t result = 0;
260 static_assert(ShortyFieldTypeTraits::IsPrimitiveWideType<decltype(result)>(),
261 "result must be a primitive wide type");
262 switch (variable_type) {
263 case ShortyFieldType::kLong:
264 CopyUnsafeAtOffset<int64_t>(offset, &result);
265 break;
266 case ShortyFieldType::kDouble:
267 CopyUnsafeAtOffset<double>(offset, &result);
268 break;
269 default:
270 LOG(FATAL)
271 << "expected a valid primitive wide shorty type but got "
272 << static_cast<char>(variable_type);
273 UNREACHABLE();
274 }
275
276 return result;
277}
278
279mirror::Object* Closure::GetCapturedObject(size_t index) const {
280 DCHECK(GetCapturedShortyType(index).IsObject());
281
282 ShortyFieldType variable_type;
283 size_t offset;
284 GetCapturedVariableTypeAndOffset(index, &variable_type, &offset);
285
286 // TODO: Restructure to use template specialization, e.g. GetCapturedPrimitive<T>
287 // so that we can avoid this nonsense regarding memcpy always overflowing.
288 // Plus, this additional switching seems redundant since the interpreter
289 // would've done it already, and knows the exact type.
290 mirror::Object* result = nullptr;
291 static_assert(ShortyFieldTypeTraits::IsObjectType<decltype(result)>(),
292 "result must be an object type");
293 switch (variable_type) {
294 case ShortyFieldType::kObject:
295 // TODO: This seems unsafe. This may need to use gcroots.
296 static_assert(kClosureSupportsGarbageCollection == false,
297 "May need GcRoots and definitely need mutator locks");
298 {
299 mirror::CompressedReference<mirror::Object> compressed_result;
300 CopyUnsafeAtOffset<uint32_t>(offset, &compressed_result);
301 result = compressed_result.AsMirrorPtr();
302 }
303 break;
304 default:
305 CHECK(false)
306 << "expected a valid shorty type but got " << static_cast<char>(variable_type);
307 UNREACHABLE();
308 }
309
310 return result;
311}
312
313size_t Closure::GetCapturedClosureSize(size_t index) const {
314 DCHECK(GetCapturedShortyType(index).IsLambda());
315 size_t offset = GetCapturedVariableOffset(index);
316
317 auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
318 size_t closure_size = GetClosureSize(captured_ptr + offset);
319
320 return closure_size;
321}
322
323void Closure::CopyCapturedClosure(size_t index, void* destination, size_t destination_room) const {
324 DCHECK(GetCapturedShortyType(index).IsLambda());
325 size_t offset = GetCapturedVariableOffset(index);
326
327 auto* captured_ptr = reinterpret_cast<const uint8_t*>(&captured_);
328 size_t closure_size = GetClosureSize(captured_ptr + offset);
329
330 static_assert(ShortyFieldTypeTraits::IsLambdaType<Closure*>(),
331 "result must be a lambda type");
332
333 CopyUnsafeAtOffset<Closure>(offset, destination, closure_size, destination_room);
334}
335
336size_t Closure::GetCapturedVariableOffset(size_t index) const {
337 VariableInfo variable_info =
338 ParseTypeDescriptor<VariableInfo::kOffset>(GetCapturedVariablesTypeDescriptor(),
339 index);
340
341 size_t offset = variable_info.offset_;
342
343 return offset;
344}
345
346void Closure::GetCapturedVariableTypeAndOffset(size_t index,
347 ShortyFieldType* out_type,
348 size_t* out_offset) const {
349 DCHECK(out_type != nullptr);
350 DCHECK(out_offset != nullptr);
351
352 static constexpr const VariableInfo::Flags kVariableTypeAndOffset =
353 static_cast<VariableInfo::Flags>(VariableInfo::kVariableType | VariableInfo::kOffset);
354 VariableInfo variable_info =
355 ParseTypeDescriptor<kVariableTypeAndOffset>(GetCapturedVariablesTypeDescriptor(),
356 index);
357
358 ShortyFieldType variable_type = variable_info.variable_type_;
359 size_t offset = variable_info.offset_;
360
361 *out_type = variable_type;
362 *out_offset = offset;
363}
364
365template <typename T>
366void Closure::CopyUnsafeAtOffset(size_t offset,
367 void* destination,
368 size_t src_size,
369 size_t destination_room) const {
370 DCHECK_GE(destination_room, src_size);
371 const uint8_t* data_ptr = GetUnsafeAtOffset<T>(offset);
372 memcpy(destination, data_ptr, sizeof(T));
373}
374
375// TODO: This is kind of ugly. I would prefer an unaligned_ptr<Closure> here.
376// Unfortunately C++ doesn't let you lower the alignment (i.e. alignas(1) Closure*) is not legal.
377size_t Closure::GetClosureSize(const uint8_t* closure) {
378 DCHECK(closure != nullptr);
379
380 static_assert(!std::is_base_of<mirror::Object, Closure>::value,
381 "It might be unsafe to call memcpy on a managed object");
382
383 // Safe as long as it's not a mirror Object.
384 // TODO: Should probably wrap this in like MemCpyNative or some such which statically asserts
385 // we aren't trying to copy mirror::Object data around.
386 ArtLambdaMethod* closure_info;
387 memcpy(&closure_info, closure + offsetof(Closure, lambda_info_), sizeof(closure_info));
388
389 if (LIKELY(closure_info->IsStaticSize())) {
390 return closure_info->GetStaticClosureSize();
391 }
392
393 // The size is dynamic, so we need to read it from captured_variables_ portion.
394 size_t dynamic_size;
395 memcpy(&dynamic_size,
396 closure + offsetof(Closure, captured_[0].dynamic_.size_),
397 sizeof(dynamic_size));
398 static_assert(sizeof(dynamic_size) == sizeof(captured_[0].dynamic_.size_),
399 "Dynamic size type must match the structural type of the size");
400
401 DCHECK_GE(dynamic_size, closure_info->GetStaticClosureSize());
402 return dynamic_size;
403}
404
405size_t Closure::GetStartingOffset() const {
406 static constexpr const size_t captured_offset = offsetof(Closure, captured_);
407 if (LIKELY(lambda_info_->IsStaticSize())) {
408 return offsetof(Closure, captured_[0].static_variables_) - captured_offset;
409 } else {
410 return offsetof(Closure, captured_[0].dynamic_.variables_) - captured_offset;
411 }
412}
413
414} // namespace lambda
415} // namespace art