blob: 5e8edf0571f89dbb420f4d4c44e58ae96e0ad547 [file] [log] [blame]
Andreas Gampe525cde22014-04-22 15:44:50 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "common_runtime_test.h"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070018#include "mirror/art_field-inl.h"
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -070019#include "mirror/string-inl.h"
Andreas Gampe525cde22014-04-22 15:44:50 -070020
21#include <cstdio>
22
23namespace art {
24
25
26class StubTest : public CommonRuntimeTest {
27 protected:
28 // We need callee-save methods set up in the Runtime for exceptions.
29 void SetUp() OVERRIDE {
30 // Do the normal setup.
31 CommonRuntimeTest::SetUp();
32
33 {
34 // Create callee-save methods
35 ScopedObjectAccess soa(Thread::Current());
Vladimir Marko7624d252014-05-02 14:40:15 +010036 runtime_->SetInstructionSet(kRuntimeISA);
Andreas Gampe525cde22014-04-22 15:44:50 -070037 for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
38 Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
39 if (!runtime_->HasCalleeSaveMethod(type)) {
Vladimir Marko7624d252014-05-02 14:40:15 +010040 runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
Andreas Gampe525cde22014-04-22 15:44:50 -070041 }
42 }
43 }
44 }
45
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070046 void SetUpRuntimeOptions(Runtime::Options *options) OVERRIDE {
47 // Use a smaller heap
48 for (std::pair<std::string, const void*>& pair : *options) {
49 if (pair.first.find("-Xmx") == 0) {
50 pair.first = "-Xmx4M"; // Smallest we can go.
51 }
52 }
53 }
Andreas Gampe525cde22014-04-22 15:44:50 -070054
55 size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
56 // Push a transition back into managed code onto the linked list in thread.
57 ManagedStack fragment;
58 self->PushManagedStackFragment(&fragment);
59
60 size_t result;
61#if defined(__i386__)
62 // TODO: Set the thread?
63 __asm__ __volatile__(
64 "pushl $0\n\t" // Push nullptr to terminate quick stack
65 "call *%%edi\n\t" // Call the stub
66 "addl $4, %%esp" // Pop nullptr
67 : "=a" (result)
68 // Use the result from eax
69 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code)
70 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
71 : ); // clobber.
72 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
73 // but compilation fails when declaring that.
74#elif defined(__arm__)
75 __asm__ __volatile__(
Andreas Gampe00c1e6d2014-04-25 15:47:13 -070076 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
77 ".cfi_adjust_cfa_offset 52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -070078 "push {r9}\n\t"
79 ".cfi_adjust_cfa_offset 4\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -070080 "mov r9, #0\n\n"
81 "str r9, [sp, #-8]!\n\t" // Push nullptr to terminate stack, +8B padding so 16B aligned
82 ".cfi_adjust_cfa_offset 8\n\t"
83 "ldr r9, [sp, #8]\n\t"
84
85 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
86 "sub sp, sp, #20\n\t"
87 "str %[arg0], [sp]\n\t"
88 "str %[arg1], [sp, #4]\n\t"
89 "str %[arg2], [sp, #8]\n\t"
90 "str %[code], [sp, #12]\n\t"
91 "str %[self], [sp, #16]\n\t"
92 "ldr r0, [sp]\n\t"
93 "ldr r1, [sp, #4]\n\t"
94 "ldr r2, [sp, #8]\n\t"
95 "ldr r3, [sp, #12]\n\t"
96 "ldr r9, [sp, #16]\n\t"
97 "add sp, sp, #20\n\t"
98
99 "blx r3\n\t" // Call the stub
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700100 "add sp, sp, #12\n\t" // Pop nullptr and padding
101 ".cfi_adjust_cfa_offset -12\n\t"
102 "pop {r1-r12, lr}\n\t" // Restore state
103 ".cfi_adjust_cfa_offset -52\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700104 "mov %[result], r0\n\t" // Save the result
105 : [result] "=r" (result)
106 // Use the result from r0
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700107 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe525cde22014-04-22 15:44:50 -0700108 : ); // clobber.
109#elif defined(__aarch64__)
110 __asm__ __volatile__(
111 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700112 ".cfi_adjust_cfa_offset 48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700113 "stp xzr, x1, [sp]\n\t" // nullptr(end of quick stack), x1
114 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
115 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
116
117 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
118 "sub sp, sp, #48\n\t"
119 "str %[arg0], [sp]\n\t"
120 "str %[arg1], [sp, #8]\n\t"
121 "str %[arg2], [sp, #16]\n\t"
122 "str %[code], [sp, #24]\n\t"
123 "str %[self], [sp, #32]\n\t"
124 "ldr x0, [sp]\n\t"
125 "ldr x1, [sp, #8]\n\t"
126 "ldr x2, [sp, #16]\n\t"
127 "ldr x3, [sp, #24]\n\t"
128 "ldr x18, [sp, #32]\n\t"
129 "add sp, sp, #48\n\t"
130
131 "blr x3\n\t" // Call the stub
Andreas Gampe525cde22014-04-22 15:44:50 -0700132 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700133 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
134 "ldr x30, [sp, #40]\n\t" // Restore xLR
Andreas Gampe525cde22014-04-22 15:44:50 -0700135 "add sp, sp, #48\n\t" // Free stack space
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700136 ".cfi_adjust_cfa_offset -48\n\t"
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700137
Andreas Gampe525cde22014-04-22 15:44:50 -0700138 "mov %[result], x0\n\t" // Save the result
139 : [result] "=r" (result)
140 // Use the result from r0
141 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700142 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
Andreas Gampe525cde22014-04-22 15:44:50 -0700143#elif defined(__x86_64__)
144 // Note: Uses the native convention
145 // TODO: Set the thread?
146 __asm__ __volatile__(
147 "pushq $0\n\t" // Push nullptr to terminate quick stack
148 "pushq $0\n\t" // 16B alignment padding
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700149 ".cfi_adjust_cfa_offset 16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700150 "call *%%rax\n\t" // Call the stub
Andreas Gampef4e910b2014-04-29 16:55:52 -0700151 "addq $16, %%rsp\n\t" // Pop nullptr and padding
152 ".cfi_adjust_cfa_offset -16\n\t"
Andreas Gampe525cde22014-04-22 15:44:50 -0700153 : "=a" (result)
154 // Use the result from rax
155 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code)
156 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
Andreas Gampef4e910b2014-04-29 16:55:52 -0700157 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
Andreas Gampe525cde22014-04-22 15:44:50 -0700158 // TODO: Should we clobber the other registers?
Andreas Gampe525cde22014-04-22 15:44:50 -0700159#else
160 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
161 result = 0;
162#endif
163 // Pop transition.
164 self->PopManagedStackFragment(fragment);
165 return result;
166 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700167
168 public:
169 // TODO: Set up a frame according to referrer's specs.
170 size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
171 mirror::ArtMethod* referrer) {
172 // Push a transition back into managed code onto the linked list in thread.
173 ManagedStack fragment;
174 self->PushManagedStackFragment(&fragment);
175
176 size_t result;
177#if defined(__i386__)
178 // TODO: Set the thread?
179 __asm__ __volatile__(
180 "pushl %[referrer]\n\t" // Store referrer
181 "call *%%edi\n\t" // Call the stub
182 "addl $4, %%esp" // Pop referrer
183 : "=a" (result)
184 // Use the result from eax
185 : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
186 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
187 : ); // clobber.
188 // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
189 // but compilation fails when declaring that.
190#elif defined(__arm__)
191 __asm__ __volatile__(
192 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
193 ".cfi_adjust_cfa_offset 52\n\t"
194 "push {r9}\n\t"
195 ".cfi_adjust_cfa_offset 4\n\t"
196 "mov r9, %[referrer]\n\n"
197 "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
198 ".cfi_adjust_cfa_offset 8\n\t"
199 "ldr r9, [sp, #8]\n\t"
200
201 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
202 "sub sp, sp, #20\n\t"
203 "str %[arg0], [sp]\n\t"
204 "str %[arg1], [sp, #4]\n\t"
205 "str %[arg2], [sp, #8]\n\t"
206 "str %[code], [sp, #12]\n\t"
207 "str %[self], [sp, #16]\n\t"
208 "ldr r0, [sp]\n\t"
209 "ldr r1, [sp, #4]\n\t"
210 "ldr r2, [sp, #8]\n\t"
211 "ldr r3, [sp, #12]\n\t"
212 "ldr r9, [sp, #16]\n\t"
213 "add sp, sp, #20\n\t"
214
215 "blx r3\n\t" // Call the stub
216 "add sp, sp, #12\n\t" // Pop nullptr and padding
217 ".cfi_adjust_cfa_offset -12\n\t"
218 "pop {r1-r12, lr}\n\t" // Restore state
219 ".cfi_adjust_cfa_offset -52\n\t"
220 "mov %[result], r0\n\t" // Save the result
221 : [result] "=r" (result)
222 // Use the result from r0
223 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
224 [referrer] "r"(referrer)
225 : ); // clobber.
226#elif defined(__aarch64__)
227 __asm__ __volatile__(
228 "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
229 ".cfi_adjust_cfa_offset 48\n\t"
230 "stp %[referrer], x1, [sp]\n\t"// referrer, x1
231 "stp x2, x3, [sp, #16]\n\t" // Save x2, x3
232 "stp x18, x30, [sp, #32]\n\t" // Save x18(xSELF), xLR
233
234 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
235 "sub sp, sp, #48\n\t"
236 "str %[arg0], [sp]\n\t"
237 "str %[arg1], [sp, #8]\n\t"
238 "str %[arg2], [sp, #16]\n\t"
239 "str %[code], [sp, #24]\n\t"
240 "str %[self], [sp, #32]\n\t"
241 "ldr x0, [sp]\n\t"
242 "ldr x1, [sp, #8]\n\t"
243 "ldr x2, [sp, #16]\n\t"
244 "ldr x3, [sp, #24]\n\t"
245 "ldr x18, [sp, #32]\n\t"
246 "add sp, sp, #48\n\t"
247
248 "blr x3\n\t" // Call the stub
249 "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
250 "ldp x3, x18, [sp, #24]\n\t" // Restore x3, xSELF
251 "ldr x30, [sp, #40]\n\t" // Restore xLR
252 "add sp, sp, #48\n\t" // Free stack space
253 ".cfi_adjust_cfa_offset -48\n\t"
254
255 "mov %[result], x0\n\t" // Save the result
256 : [result] "=r" (result)
257 // Use the result from r0
258 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
259 [referrer] "r"(referrer)
260 : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
261#elif defined(__x86_64__)
262 // Note: Uses the native convention
263 // TODO: Set the thread?
264 __asm__ __volatile__(
265 "pushq %[referrer]\n\t" // Push referrer
266 "pushq (%%rsp)\n\t" // & 16B alignment padding
267 ".cfi_adjust_cfa_offset 16\n\t"
268 "call *%%rax\n\t" // Call the stub
269 "addq $16, %%rsp\n\t" // Pop nullptr and padding
270 ".cfi_adjust_cfa_offset -16\n\t"
271 : "=a" (result)
272 // Use the result from rax
273 : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer)
274 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
275 : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
276 // TODO: Should we clobber the other registers?
277#else
278 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
279 result = 0;
280#endif
281 // Pop transition.
282 self->PopManagedStackFragment(fragment);
283 return result;
284 }
285
286 // Method with 32b arg0, 64b arg1
287 size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
288 mirror::ArtMethod* referrer) {
289#if defined(__x86_64__) || defined(__aarch64__)
290 // Just pass through.
291 return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
292#else
293 // Need to split up arguments.
294 uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
295 uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
296
297 return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
298#endif
299 }
300
301 // Method with 32b arg0, 32b arg1, 64b arg2
302 size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
303 Thread* self, mirror::ArtMethod* referrer) {
304#if defined(__x86_64__) || defined(__aarch64__)
305 // Just pass through.
306 return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
307#else
308 // TODO: Needs 4-param invoke.
309 return 0;
310#endif
311 }
Andreas Gampe525cde22014-04-22 15:44:50 -0700312};
313
314
315#if defined(__i386__) || defined(__x86_64__)
316extern "C" void art_quick_memcpy(void);
317#endif
318
319TEST_F(StubTest, Memcpy) {
320#if defined(__i386__) || defined(__x86_64__)
321 Thread* self = Thread::Current();
322
323 uint32_t orig[20];
324 uint32_t trg[20];
325 for (size_t i = 0; i < 20; ++i) {
326 orig[i] = i;
327 trg[i] = 0;
328 }
329
330 Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
331 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
332
333 EXPECT_EQ(orig[0], trg[0]);
334
335 for (size_t i = 1; i < 4; ++i) {
336 EXPECT_NE(orig[i], trg[i]);
337 }
338
339 for (size_t i = 4; i < 14; ++i) {
340 EXPECT_EQ(orig[i], trg[i]);
341 }
342
343 for (size_t i = 14; i < 20; ++i) {
344 EXPECT_NE(orig[i], trg[i]);
345 }
346
347 // TODO: Test overlapping?
348
349#else
350 LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
351 // Force-print to std::cout so it's also outside the logcat.
352 std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
353#endif
354}
355
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700356#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700357extern "C" void art_quick_lock_object(void);
358#endif
359
360TEST_F(StubTest, LockObject) {
Alexei Zavjalov80c79342014-05-02 16:45:40 +0700361#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700362 static constexpr size_t kThinLockLoops = 100;
363
Andreas Gampe525cde22014-04-22 15:44:50 -0700364 Thread* self = Thread::Current();
365 // Create an object
366 ScopedObjectAccess soa(self);
367 // garbage is created during ClassLinker::Init
368
369 SirtRef<mirror::String> obj(soa.Self(),
370 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
371 LockWord lock = obj->GetLockWord(false);
372 LockWord::LockState old_state = lock.GetState();
373 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
374
375 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
376 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
377
378 LockWord lock_after = obj->GetLockWord(false);
379 LockWord::LockState new_state = lock_after.GetState();
380 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700381 EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
382
383 for (size_t i = 1; i < kThinLockLoops; ++i) {
384 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
385 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
386
387 // Check we're at lock count i
388
389 LockWord l_inc = obj->GetLockWord(false);
390 LockWord::LockState l_inc_state = l_inc.GetState();
391 EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
392 EXPECT_EQ(l_inc.ThinLockCount(), i);
393 }
394
395 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
396
397#else
398 LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
399 // Force-print to std::cout so it's also outside the logcat.
400 std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
401#endif
402}
403
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700404
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700405class RandGen {
406 public:
407 explicit RandGen(uint32_t seed) : val_(seed) {}
408
409 uint32_t next() {
410 val_ = val_ * 48271 % 2147483647 + 13;
411 return val_;
412 }
413
414 uint32_t val_;
415};
416
417
418#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
419extern "C" void art_quick_lock_object(void);
420extern "C" void art_quick_unlock_object(void);
421#endif
422
423TEST_F(StubTest, UnlockObject) {
424#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700425 static constexpr size_t kThinLockLoops = 100;
426
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700427 Thread* self = Thread::Current();
428 // Create an object
429 ScopedObjectAccess soa(self);
430 // garbage is created during ClassLinker::Init
431
432 SirtRef<mirror::String> obj(soa.Self(),
433 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
434 LockWord lock = obj->GetLockWord(false);
435 LockWord::LockState old_state = lock.GetState();
436 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
437
438 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
439 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
440
441 // This should be an illegal monitor state.
442 EXPECT_TRUE(self->IsExceptionPending());
443 self->ClearException();
444
445 LockWord lock_after = obj->GetLockWord(false);
446 LockWord::LockState new_state = lock_after.GetState();
447 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
Andreas Gampe525cde22014-04-22 15:44:50 -0700448
449 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
450 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
451
452 LockWord lock_after2 = obj->GetLockWord(false);
453 LockWord::LockState new_state2 = lock_after2.GetState();
454 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
455
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700456 Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
457 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
458
459 LockWord lock_after3 = obj->GetLockWord(false);
460 LockWord::LockState new_state3 = lock_after3.GetState();
461 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
462
463 // Stress test:
464 // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
465 // each step.
466
467 RandGen r(0x1234);
468
469 constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
470 constexpr size_t kIterations = 10000; // Number of iterations
471
472 size_t counts[kNumberOfLocks];
473 SirtRef<mirror::String>* objects[kNumberOfLocks];
474
475 // Initialize = allocate.
476 for (size_t i = 0; i < kNumberOfLocks; ++i) {
477 counts[i] = 0;
478 objects[i] = new SirtRef<mirror::String>(soa.Self(),
479 mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
480 }
481
482 for (size_t i = 0; i < kIterations; ++i) {
483 // Select which lock to update.
484 size_t index = r.next() % kNumberOfLocks;
485
486 bool lock; // Whether to lock or unlock in this step.
487 if (counts[index] == 0) {
488 lock = true;
489 } else if (counts[index] == kThinLockLoops) {
490 lock = false;
491 } else {
492 // Randomly.
493 lock = r.next() % 2 == 0;
494 }
495
496 if (lock) {
497 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
498 reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
499 counts[index]++;
500 } else {
501 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
502 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
503 counts[index]--;
504 }
505
506 EXPECT_FALSE(self->IsExceptionPending());
507
508 // Check the new state.
509 LockWord lock_iter = objects[index]->get()->GetLockWord(false);
510 LockWord::LockState iter_state = lock_iter.GetState();
511 if (counts[index] > 0) {
512 EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
513 EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
514 } else {
515 EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
516 }
517 }
518
519 // Unlock the remaining count times and then check it's unlocked. Then deallocate.
520 // Go reverse order to correctly handle SirtRefs.
521 for (size_t i = 0; i < kNumberOfLocks; ++i) {
522 size_t index = kNumberOfLocks - 1 - i;
523 size_t count = counts[index];
524 while (count > 0) {
525 Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
526 reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
527
528 count--;
529 }
530
531 LockWord lock_after4 = objects[index]->get()->GetLockWord(false);
532 LockWord::LockState new_state4 = lock_after4.GetState();
533 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state4);
534
535 delete objects[index];
536 }
537
Andreas Gampe525cde22014-04-22 15:44:50 -0700538 // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
539
540#else
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700541 LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
Andreas Gampe525cde22014-04-22 15:44:50 -0700542 // Force-print to std::cout so it's also outside the logcat.
Andreas Gampe7177d7c2014-05-02 12:10:02 -0700543 std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
Andreas Gampe525cde22014-04-22 15:44:50 -0700544#endif
545}
546
547
548#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
549extern "C" void art_quick_check_cast(void);
550#endif
551
552TEST_F(StubTest, CheckCast) {
553#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
554 Thread* self = Thread::Current();
555 // Find some classes.
556 ScopedObjectAccess soa(self);
557 // garbage is created during ClassLinker::Init
558
559 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
560 "[Ljava/lang/Object;"));
561 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
562 "[Ljava/lang/String;"));
563
564 EXPECT_FALSE(self->IsExceptionPending());
565
566 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
567 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
568
569 EXPECT_FALSE(self->IsExceptionPending());
570
571 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
572 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
573
574 EXPECT_FALSE(self->IsExceptionPending());
575
576 Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
577 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
578
579 EXPECT_FALSE(self->IsExceptionPending());
580
581 // TODO: Make the following work. But that would require correct managed frames.
582
583 Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
584 reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
585
586 EXPECT_TRUE(self->IsExceptionPending());
587 self->ClearException();
588
589#else
590 LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
591 // Force-print to std::cout so it's also outside the logcat.
592 std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
593#endif
594}
595
596
Andreas Gampef4e910b2014-04-29 16:55:52 -0700597#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700598extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
599// Do not check non-checked ones, we'd need handlers and stuff...
600#endif
601
602TEST_F(StubTest, APutObj) {
Hiroshi Yamauchid6881ae2014-04-28 17:21:48 -0700603 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
604
Andreas Gampef4e910b2014-04-29 16:55:52 -0700605#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Andreas Gampe525cde22014-04-22 15:44:50 -0700606 Thread* self = Thread::Current();
607 // Create an object
608 ScopedObjectAccess soa(self);
609 // garbage is created during ClassLinker::Init
610
611 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
612 "Ljava/lang/Object;"));
613 SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
614 "Ljava/lang/String;"));
615 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
616 "[Ljava/lang/String;"));
617
618 // Build a string array of size 1
619 SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
Andreas Gampef4e910b2014-04-29 16:55:52 -0700620 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 10));
Andreas Gampe525cde22014-04-22 15:44:50 -0700621
622 // Build a string -> should be assignable
623 SirtRef<mirror::Object> str_obj(soa.Self(),
624 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
625
626 // Build a generic object -> should fail assigning
627 SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
628
629 // Play with it...
630
631 // 1) Success cases
Andreas Gampef4e910b2014-04-29 16:55:52 -0700632 // 1.1) Assign str_obj to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700633
634 EXPECT_FALSE(self->IsExceptionPending());
635
636 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
637 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
638
639 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700640 EXPECT_EQ(str_obj.get(), array->Get(0));
Andreas Gampe525cde22014-04-22 15:44:50 -0700641
Andreas Gampef4e910b2014-04-29 16:55:52 -0700642 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
643 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
644
645 EXPECT_FALSE(self->IsExceptionPending());
646 EXPECT_EQ(str_obj.get(), array->Get(1));
647
648 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(str_obj.get()),
649 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
650
651 EXPECT_FALSE(self->IsExceptionPending());
652 EXPECT_EQ(str_obj.get(), array->Get(2));
653
654 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(str_obj.get()),
655 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
656
657 EXPECT_FALSE(self->IsExceptionPending());
658 EXPECT_EQ(str_obj.get(), array->Get(3));
659
660 // 1.2) Assign null to array[0..3]
Andreas Gampe525cde22014-04-22 15:44:50 -0700661
662 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
663 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
664
665 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampef4e910b2014-04-29 16:55:52 -0700666 EXPECT_EQ(nullptr, array->Get(0));
667
668 Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(nullptr),
669 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
670
671 EXPECT_FALSE(self->IsExceptionPending());
672 EXPECT_EQ(nullptr, array->Get(1));
673
674 Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(nullptr),
675 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
676
677 EXPECT_FALSE(self->IsExceptionPending());
678 EXPECT_EQ(nullptr, array->Get(2));
679
680 Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(nullptr),
681 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
682
683 EXPECT_FALSE(self->IsExceptionPending());
684 EXPECT_EQ(nullptr, array->Get(3));
Andreas Gampe525cde22014-04-22 15:44:50 -0700685
686 // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
687
688 // 2) Failure cases (str into str[])
689 // 2.1) Array = null
690 // TODO: Throwing NPE needs actual DEX code
691
692// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
693// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
694//
695// EXPECT_TRUE(self->IsExceptionPending());
696// self->ClearException();
697
698 // 2.2) Index < 0
699
700 Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
701 reinterpret_cast<size_t>(str_obj.get()),
702 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
703
704 EXPECT_TRUE(self->IsExceptionPending());
705 self->ClearException();
706
707 // 2.3) Index > 0
708
Andreas Gampef4e910b2014-04-29 16:55:52 -0700709 Invoke3(reinterpret_cast<size_t>(array.get()), 10U, reinterpret_cast<size_t>(str_obj.get()),
Andreas Gampe525cde22014-04-22 15:44:50 -0700710 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
711
712 EXPECT_TRUE(self->IsExceptionPending());
713 self->ClearException();
714
715 // 3) Failure cases (obj into str[])
716
717 Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
718 reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
719
720 EXPECT_TRUE(self->IsExceptionPending());
721 self->ClearException();
722
723 // Tests done.
724#else
725 LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
726 // Force-print to std::cout so it's also outside the logcat.
727 std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
728#endif
729}
730
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700731
732#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
733extern "C" void art_quick_alloc_object_rosalloc(void);
734extern "C" void art_quick_alloc_object_resolved_rosalloc(void);
735extern "C" void art_quick_alloc_object_initialized_rosalloc(void);
736#endif
737
738TEST_F(StubTest, AllocObject) {
739 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
740
741#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
742 // TODO: Check the "Unresolved" allocation stubs
743
744 Thread* self = Thread::Current();
745 // Create an object
746 ScopedObjectAccess soa(self);
747 // garbage is created during ClassLinker::Init
748
749 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
750 "Ljava/lang/Object;"));
751
752 // Play with it...
753
754 EXPECT_FALSE(self->IsExceptionPending());
755
756 {
757 // Use an arbitrary method from c to use as referrer
758 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
759 reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
760 0U,
761 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_rosalloc),
762 self);
763
764 EXPECT_FALSE(self->IsExceptionPending());
765 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
766 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
767 EXPECT_EQ(c.get(), obj->GetClass());
768 VerifyObject(obj);
769 }
770
771 {
772 // We can use nullptr in the second argument as we do not need a method here (not used in
773 // resolved/initialized cases)
774 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
775 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_resolved_rosalloc),
776 self);
777
778 EXPECT_FALSE(self->IsExceptionPending());
779 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
780 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
781 EXPECT_EQ(c.get(), obj->GetClass());
782 VerifyObject(obj);
783 }
784
785 {
786 // We can use nullptr in the second argument as we do not need a method here (not used in
787 // resolved/initialized cases)
788 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
789 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_initialized_rosalloc),
790 self);
791
792 EXPECT_FALSE(self->IsExceptionPending());
793 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
794 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
795 EXPECT_EQ(c.get(), obj->GetClass());
796 VerifyObject(obj);
797 }
798
799 // Failure tests.
800
801 // Out-of-memory.
802 {
803 Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
804
805 // Array helps to fill memory faster.
806 SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
807 "[Ljava/lang/Object;"));
808 std::vector<SirtRef<mirror::Object>*> sirt_refs;
809 // Start allocating with 128K
810 size_t length = 128 * KB / 4;
811 while (length > 10) {
812 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
813 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
814 ca.get(),
815 length/4));
816 if (self->IsExceptionPending() || ref->get() == nullptr) {
817 self->ClearException();
818 delete ref;
819
820 // Try a smaller length
821 length = length / 8;
822 // Use at most half the reported free space.
823 size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
824 if (length * 8 > mem) {
825 length = mem / 8;
826 }
827 } else {
828 sirt_refs.push_back(ref);
829 }
830 }
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700831 LOG(INFO) << "Used " << sirt_refs.size() << " arrays to fill space.";
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700832
833 // Allocate simple objects till it fails.
834 while (!self->IsExceptionPending()) {
835 SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
836 c->AllocObject(soa.Self()));
837 if (!self->IsExceptionPending() && ref->get() != nullptr) {
838 sirt_refs.push_back(ref);
839 } else {
840 delete ref;
841 }
842 }
843 self->ClearException();
844
845 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
846 reinterpret_cast<uintptr_t>(&art_quick_alloc_object_initialized_rosalloc),
847 self);
848
849 EXPECT_TRUE(self->IsExceptionPending());
850 self->ClearException();
851 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
852
853 // Release all the allocated objects.
854 // Need to go backward to release SirtRef in the right order.
855 auto it = sirt_refs.rbegin();
856 auto end = sirt_refs.rend();
857 for (; it != end; ++it) {
858 delete *it;
859 }
860 }
861
862 // Tests done.
863#else
864 LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
865 // Force-print to std::cout so it's also outside the logcat.
866 std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
867#endif
868}
869
870
871#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
872extern "C" void art_quick_alloc_array_rosalloc(void);
873extern "C" void art_quick_alloc_array_resolved_rosalloc(void);
874#endif
875
876TEST_F(StubTest, AllocObjectArray) {
877 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
878
879#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
880 // TODO: Check the "Unresolved" allocation stubs
881
882 Thread* self = Thread::Current();
883 // Create an object
884 ScopedObjectAccess soa(self);
885 // garbage is created during ClassLinker::Init
886
887 SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
888 "[Ljava/lang/Object;"));
889
890 // Needed to have a linked method.
891 SirtRef<mirror::Class> c_obj(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
892 "Ljava/lang/Object;"));
893
894 // Play with it...
895
896 EXPECT_FALSE(self->IsExceptionPending());
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700897
898 // For some reason this does not work, as the type_idx is artificial and outside what the
899 // resolved types of c_obj allow...
900
901 if (false) {
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700902 // Use an arbitrary method from c to use as referrer
903 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
904 reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
905 10U,
906 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_rosalloc),
907 self);
908
909 EXPECT_FALSE(self->IsExceptionPending());
910 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
911 mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
912 EXPECT_EQ(c.get(), obj->GetClass());
913 VerifyObject(obj);
914 EXPECT_EQ(obj->GetLength(), 10);
915 }
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700916
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700917 {
918 // We can use nullptr in the second argument as we do not need a method here (not used in
919 // resolved/initialized cases)
920 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 10U,
921 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_resolved_rosalloc),
922 self);
923
Andreas Gampe6e4e59c2014-05-05 20:11:02 -0700924 EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
Andreas Gampe00c1e6d2014-04-25 15:47:13 -0700925 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
926 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
927 EXPECT_TRUE(obj->IsArrayInstance());
928 EXPECT_TRUE(obj->IsObjectArray());
929 EXPECT_EQ(c.get(), obj->GetClass());
930 VerifyObject(obj);
931 mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
932 EXPECT_EQ(array->GetLength(), 10);
933 }
934
935 // Failure tests.
936
937 // Out-of-memory.
938 {
939 size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr),
940 GB, // that should fail...
941 reinterpret_cast<uintptr_t>(&art_quick_alloc_array_resolved_rosalloc),
942 self);
943
944 EXPECT_TRUE(self->IsExceptionPending());
945 self->ClearException();
946 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
947 }
948
949 // Tests done.
950#else
951 LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
952 // Force-print to std::cout so it's also outside the logcat.
953 std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
954#endif
955}
956
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700957
Andreas Gampe266340d2014-05-02 07:55:24 -0700958#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700959extern "C" void art_quick_string_compareto(void);
960#endif
961
962TEST_F(StubTest, StringCompareTo) {
963 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
964
Andreas Gampe266340d2014-05-02 07:55:24 -0700965#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700966 // TODO: Check the "Unresolved" allocation stubs
967
968 Thread* self = Thread::Current();
969 ScopedObjectAccess soa(self);
970 // garbage is created during ClassLinker::Init
971
972 // Create some strings
973 // Use array so we can index into it and use a matrix for expected results
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700974 // Setup: The first half is standard. The second half uses a non-zero offset.
975 // TODO: Shared backing arrays.
976 constexpr size_t base_string_count = 7;
977 const char* c[base_string_count] = { "", "", "a", "aa", "ab", "aac", "aac" , };
978
979 constexpr size_t string_count = 2 * base_string_count;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700980
981 SirtRef<mirror::String>* s[string_count];
982
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700983 for (size_t i = 0; i < base_string_count; ++i) {
Alexei Zavjalov315ccab2014-05-01 23:24:05 +0700984 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
985 c[i]));
986 }
987
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -0700988 RandGen r(0x1234);
989
990 for (size_t i = base_string_count; i < string_count; ++i) {
991 s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
992 c[i - base_string_count]));
993 int32_t length = s[i]->get()->GetLength();
994 if (length > 1) {
995 // Set a random offset and length.
996 int32_t new_offset = 1 + (r.next() % (length - 1));
997 int32_t rest = length - new_offset - 1;
998 int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
999
1000 s[i]->get()->SetField32<false>(mirror::String::CountOffset(), new_length);
1001 s[i]->get()->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
1002 }
1003 }
1004
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001005 // TODO: wide characters
1006
1007 // Matrix of expectations. First component is first parameter. Note we only check against the
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -07001008 // sign, not the value. As we are testing random offsets, we need to compute this and need to
1009 // rely on String::CompareTo being correct.
1010 int32_t expected[string_count][string_count];
1011 for (size_t x = 0; x < string_count; ++x) {
1012 for (size_t y = 0; y < string_count; ++y) {
1013 expected[x][y] = s[x]->get()->CompareTo(s[y]->get());
1014 }
1015 }
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001016
1017 // Play with it...
1018
1019 for (size_t x = 0; x < string_count; ++x) {
1020 for (size_t y = 0; y < string_count; ++y) {
1021 // Test string_compareto x y
1022 size_t result = Invoke3(reinterpret_cast<size_t>(s[x]->get()),
1023 reinterpret_cast<size_t>(s[y]->get()), 0U,
1024 reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
1025
1026 EXPECT_FALSE(self->IsExceptionPending());
1027
1028 // The result is a 32b signed integer
1029 union {
1030 size_t r;
1031 int32_t i;
1032 } conv;
1033 conv.r = result;
1034 int32_t e = expected[x][y];
Andreas Gampe2ba8d4b2014-05-02 17:33:17 -07001035 EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1036 conv.r;
1037 EXPECT_TRUE(e < 0 ? conv.i < 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1038 conv.r;
1039 EXPECT_TRUE(e > 0 ? conv.i > 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1040 conv.r;
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001041 }
1042 }
1043
Andreas Gampe7177d7c2014-05-02 12:10:02 -07001044 // TODO: Deallocate things.
1045
Alexei Zavjalov315ccab2014-05-01 23:24:05 +07001046 // Tests done.
1047#else
1048 LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1049 // Force-print to std::cout so it's also outside the logcat.
1050 std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1051 std::endl;
1052#endif
1053}
1054
Andreas Gampe6e4e59c2014-05-05 20:11:02 -07001055
1056#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1057extern "C" void art_quick_set32_static(void);
1058extern "C" void art_quick_get32_static(void);
1059#endif
1060
1061static void GetSet32Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1062 mirror::ArtMethod* referrer, StubTest* test)
1063 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1064#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1065 constexpr size_t num_values = 7;
1066 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1067
1068 for (size_t i = 0; i < num_values; ++i) {
1069 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1070 static_cast<size_t>(values[i]),
1071 0U,
1072 reinterpret_cast<uintptr_t>(&art_quick_set32_static),
1073 self,
1074 referrer);
1075
1076 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1077 0U, 0U,
1078 reinterpret_cast<uintptr_t>(&art_quick_get32_static),
1079 self,
1080 referrer);
1081
1082 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1083 }
1084#else
1085 LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1086 // Force-print to std::cout so it's also outside the logcat.
1087 std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1088#endif
1089}
1090
1091
1092#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1093extern "C" void art_quick_set32_instance(void);
1094extern "C" void art_quick_get32_instance(void);
1095#endif
1096
1097static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1098 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1099 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1100#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1101 constexpr size_t num_values = 7;
1102 uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1103
1104 for (size_t i = 0; i < num_values; ++i) {
1105 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1106 reinterpret_cast<size_t>(obj->get()),
1107 static_cast<size_t>(values[i]),
1108 reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
1109 self,
1110 referrer);
1111
1112 int32_t res = f->get()->GetInt(obj->get());
1113 EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1114
1115 res++;
1116 f->get()->SetInt<false>(obj->get(), res);
1117
1118 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1119 reinterpret_cast<size_t>(obj->get()),
1120 0U,
1121 reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
1122 self,
1123 referrer);
1124 EXPECT_EQ(res, static_cast<int32_t>(res2));
1125 }
1126#else
1127 LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1128 // Force-print to std::cout so it's also outside the logcat.
1129 std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1130#endif
1131}
1132
1133
1134#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1135extern "C" void art_quick_set_obj_static(void);
1136extern "C" void art_quick_get_obj_static(void);
1137
1138static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1139 mirror::ArtMethod* referrer, StubTest* test)
1140 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1141 test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1142 reinterpret_cast<size_t>(val),
1143 0U,
1144 reinterpret_cast<uintptr_t>(&art_quick_set_obj_static),
1145 self,
1146 referrer);
1147
1148 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1149 0U, 0U,
1150 reinterpret_cast<uintptr_t>(&art_quick_get_obj_static),
1151 self,
1152 referrer);
1153
1154 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1155}
1156#endif
1157
1158static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1159 mirror::ArtMethod* referrer, StubTest* test)
1160 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1161#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1162 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1163
1164 // Allocate a string object for simplicity.
1165 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1166 set_and_check_static((*f)->GetDexFieldIndex(), str, self, referrer, test);
1167
1168 set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1169#else
1170 LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1171 // Force-print to std::cout so it's also outside the logcat.
1172 std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1173#endif
1174}
1175
1176
1177#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1178extern "C" void art_quick_set_obj_instance(void);
1179extern "C" void art_quick_get_obj_instance(void);
1180
1181static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object* trg,
1182 mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
1183 StubTest* test)
1184 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1185 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1186 reinterpret_cast<size_t>(trg),
1187 reinterpret_cast<size_t>(val),
1188 reinterpret_cast<uintptr_t>(&art_quick_set_obj_instance),
1189 self,
1190 referrer);
1191
1192 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1193 reinterpret_cast<size_t>(trg),
1194 0U,
1195 reinterpret_cast<uintptr_t>(&art_quick_get_obj_instance),
1196 self,
1197 referrer);
1198
1199 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1200
1201 EXPECT_EQ(val, f->get()->GetObj(trg));
1202}
1203#endif
1204
1205static void GetSetObjInstance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1206 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1208#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
1209 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1210
1211 // Allocate a string object for simplicity.
1212 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1213 set_and_check_instance(f, obj->get(), str, self, referrer, test);
1214
1215 set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
1216#else
1217 LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1218 // Force-print to std::cout so it's also outside the logcat.
1219 std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1220#endif
1221}
1222
1223
1224// TODO: Complete these tests for 32b architectures.
1225
1226#if defined(__x86_64__) || defined(__aarch64__)
1227extern "C" void art_quick_set64_static(void);
1228extern "C" void art_quick_get64_static(void);
1229#endif
1230
1231static void GetSet64Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
1232 mirror::ArtMethod* referrer, StubTest* test)
1233 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1234#if defined(__x86_64__) || defined(__aarch64__)
1235 constexpr size_t num_values = 8;
1236 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1237
1238 for (size_t i = 0; i < num_values; ++i) {
1239 test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1240 values[i],
1241 reinterpret_cast<uintptr_t>(&art_quick_set64_static),
1242 self,
1243 referrer);
1244
1245 size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1246 0U, 0U,
1247 reinterpret_cast<uintptr_t>(&art_quick_get64_static),
1248 self,
1249 referrer);
1250
1251 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1252 }
1253#else
1254 LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1255 // Force-print to std::cout so it's also outside the logcat.
1256 std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1257#endif
1258}
1259
1260
1261#if defined(__x86_64__) || defined(__aarch64__)
1262extern "C" void art_quick_set64_instance(void);
1263extern "C" void art_quick_get64_instance(void);
1264#endif
1265
1266static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
1267 Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1268 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1269#if defined(__x86_64__) || defined(__aarch64__)
1270 constexpr size_t num_values = 8;
1271 uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1272
1273 for (size_t i = 0; i < num_values; ++i) {
1274 test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1275 reinterpret_cast<size_t>(obj->get()),
1276 static_cast<size_t>(values[i]),
1277 reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
1278 self,
1279 referrer);
1280
1281 int64_t res = f->get()->GetLong(obj->get());
1282 EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1283
1284 res++;
1285 f->get()->SetLong<false>(obj->get(), res);
1286
1287 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1288 reinterpret_cast<size_t>(obj->get()),
1289 0U,
1290 reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
1291 self,
1292 referrer);
1293 EXPECT_EQ(res, static_cast<int64_t>(res2));
1294 }
1295#else
1296 LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1297 // Force-print to std::cout so it's also outside the logcat.
1298 std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1299#endif
1300}
1301
1302static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1303 // garbage is created during ClassLinker::Init
1304
1305 JNIEnv* env = Thread::Current()->GetJniEnv();
1306 jclass jc = env->FindClass("AllFields");
1307 CHECK(jc != NULL);
1308 jobject o = env->AllocObject(jc);
1309 CHECK(o != NULL);
1310
1311 ScopedObjectAccess soa(self);
1312 SirtRef<mirror::Object> obj(self, soa.Decode<mirror::Object*>(o));
1313
1314 SirtRef<mirror::Class> c(self, obj->GetClass());
1315
1316 // Need a method as a referrer
1317 SirtRef<mirror::ArtMethod> m(self, c->GetDirectMethod(0));
1318
1319 // Play with it...
1320
1321 // Static fields.
1322 {
1323 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetSFields());
1324 int32_t num_fields = fields->GetLength();
1325 for (int32_t i = 0; i < num_fields; ++i) {
1326 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1327
1328 FieldHelper fh(f.get());
1329 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1330 switch (type) {
1331 case Primitive::Type::kPrimInt:
1332 if (test_type == type) {
1333 GetSet32Static(&obj, &f, self, m.get(), test);
1334 }
1335 break;
1336
1337 case Primitive::Type::kPrimLong:
1338 if (test_type == type) {
1339 GetSet64Static(&obj, &f, self, m.get(), test);
1340 }
1341 break;
1342
1343 case Primitive::Type::kPrimNot:
1344 // Don't try array.
1345 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1346 GetSetObjStatic(&obj, &f, self, m.get(), test);
1347 }
1348 break;
1349
1350 default:
1351 break; // Skip.
1352 }
1353 }
1354 }
1355
1356 // Instance fields.
1357 {
1358 SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetIFields());
1359 int32_t num_fields = fields->GetLength();
1360 for (int32_t i = 0; i < num_fields; ++i) {
1361 SirtRef<mirror::ArtField> f(self, fields->Get(i));
1362
1363 FieldHelper fh(f.get());
1364 Primitive::Type type = fh.GetTypeAsPrimitiveType();
1365 switch (type) {
1366 case Primitive::Type::kPrimInt:
1367 if (test_type == type) {
1368 GetSet32Instance(&obj, &f, self, m.get(), test);
1369 }
1370 break;
1371
1372 case Primitive::Type::kPrimLong:
1373 if (test_type == type) {
1374 GetSet64Instance(&obj, &f, self, m.get(), test);
1375 }
1376 break;
1377
1378 case Primitive::Type::kPrimNot:
1379 // Don't try array.
1380 if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
1381 GetSetObjInstance(&obj, &f, self, m.get(), test);
1382 }
1383 break;
1384
1385 default:
1386 break; // Skip.
1387 }
1388 }
1389 }
1390
1391 // TODO: Deallocate things.
1392}
1393
1394
1395TEST_F(StubTest, Fields32) {
1396 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1397
1398 Thread* self = Thread::Current();
1399
1400 self->TransitionFromSuspendedToRunnable();
1401 LoadDex("AllFields");
1402 bool started = runtime_->Start();
1403 CHECK(started);
1404
1405 TestFields(self, this, Primitive::Type::kPrimInt);
1406}
1407
1408TEST_F(StubTest, FieldsObj) {
1409 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1410
1411 Thread* self = Thread::Current();
1412
1413 self->TransitionFromSuspendedToRunnable();
1414 LoadDex("AllFields");
1415 bool started = runtime_->Start();
1416 CHECK(started);
1417
1418 TestFields(self, this, Primitive::Type::kPrimNot);
1419}
1420
1421TEST_F(StubTest, Fields64) {
1422 TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1423
1424 Thread* self = Thread::Current();
1425
1426 self->TransitionFromSuspendedToRunnable();
1427 LoadDex("AllFields");
1428 bool started = runtime_->Start();
1429 CHECK(started);
1430
1431 TestFields(self, this, Primitive::Type::kPrimLong);
1432}
1433
Andreas Gampe525cde22014-04-22 15:44:50 -07001434} // namespace art