blob: 3312f8f21a78154edeb555908ff2f1fbd2554fda [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010017#include <dirent.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070018#include <fstream>
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010019#include <sys/types.h>
20#include <map>
Dave Allison65fcc2c2014-04-28 13:45:27 -070021
22#include "gtest/gtest.h"
23#include "utils/arm/assembler_thumb2.h"
24#include "base/hex_dump.h"
25#include "common_runtime_test.h"
26
27namespace art {
28namespace arm {
29
30// Include results file (generated manually)
31#include "assembler_thumb_test_expected.cc.inc"
32
Dave Allisond20ddb22014-06-05 14:16:30 -070033#ifndef HAVE_ANDROID_OS
Dave Allison45fdb932014-06-25 12:37:10 -070034// This controls whether the results are printed to the
35// screen or compared against the expected output.
36// To generate new expected output, set this to true and
37// copy the output into the .cc.inc file in the form
38// of the other results.
39//
40// When this is false, the results are not printed to the
41// output, but are compared against the expected results
42// in the .cc.inc file.
Dave Allison65fcc2c2014-04-28 13:45:27 -070043static constexpr bool kPrintResults = false;
Dave Allisond20ddb22014-06-05 14:16:30 -070044#endif
Dave Allison65fcc2c2014-04-28 13:45:27 -070045
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010046static const char* TOOL_PREFIX = "arm-linux-androideabi-";
47
Dave Allison65fcc2c2014-04-28 13:45:27 -070048void SetAndroidData() {
49 const char* data = getenv("ANDROID_DATA");
50 if (data == nullptr) {
51 setenv("ANDROID_DATA", "/tmp", 1);
52 }
53}
54
Dave Allison45fdb932014-06-25 12:37:10 -070055int CompareIgnoringSpace(const char* s1, const char* s2) {
56 while (*s1 != '\0') {
57 while (isspace(*s1)) ++s1;
58 while (isspace(*s2)) ++s2;
59 if (*s1 == '\0' || *s1 != *s2) {
60 break;
61 }
62 ++s1;
63 ++s2;
64 }
65 return *s1 - *s2;
66}
67
Dave Allison65fcc2c2014-04-28 13:45:27 -070068std::string GetAndroidToolsDir() {
69 std::string root;
70 const char* android_build_top = getenv("ANDROID_BUILD_TOP");
71 if (android_build_top != nullptr) {
72 root += android_build_top;
73 } else {
74 // Not set by build server, so default to current directory
75 char* cwd = getcwd(nullptr, 0);
76 setenv("ANDROID_BUILD_TOP", cwd, 1);
77 root += cwd;
78 free(cwd);
79 }
80
81 // Look for "prebuilts"
82 std::string toolsdir = root;
83 struct stat st;
84 while (toolsdir != "") {
85 std::string prebuilts = toolsdir + "/prebuilts";
86 if (stat(prebuilts.c_str(), &st) == 0) {
87 // Found prebuilts.
88 toolsdir += "/prebuilts/gcc/linux-x86/arm";
89 break;
90 }
91 // Not present, move up one dir.
92 size_t slash = toolsdir.rfind('/');
93 if (slash == std::string::npos) {
94 toolsdir = "";
95 } else {
96 toolsdir = toolsdir.substr(0, slash-1);
97 }
98 }
99 bool statok = stat(toolsdir.c_str(), &st) == 0;
100 if (!statok) {
Dave Allisonc819e0d2014-06-05 13:58:56 -0700101 return ""; // Use path.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700102 }
103
104 DIR* dir = opendir(toolsdir.c_str());
105 if (dir == nullptr) {
Dave Allisonc819e0d2014-06-05 13:58:56 -0700106 return ""; // Use path.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700107 }
108
109 struct dirent* entry;
110 std::string founddir;
111 double maxversion = 0;
112
113 // Find the latest version of the arm-eabi tools (biggest version number).
114 // Suffix on toolsdir will be something like "arm-eabi-4.8"
115 while ((entry = readdir(dir)) != nullptr) {
116 std::string subdir = toolsdir + std::string("/") + std::string(entry->d_name);
Nicolas Geoffray96f89a22014-07-11 10:57:49 +0100117 size_t eabi = subdir.find(TOOL_PREFIX);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700118 if (eabi != std::string::npos) {
Nicolas Geoffray8baf0d92014-07-21 11:51:32 +0100119 std::string suffix = subdir.substr(eabi + strlen(TOOL_PREFIX));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700120 double version = strtod(suffix.c_str(), nullptr);
121 if (version > maxversion) {
122 maxversion = version;
123 founddir = subdir;
124 }
125 }
126 }
127 closedir(dir);
128 bool found = founddir != "";
129 if (!found) {
Dave Allisonc819e0d2014-06-05 13:58:56 -0700130 return ""; // Use path.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700131 }
132
Dave Allisonc819e0d2014-06-05 13:58:56 -0700133 return founddir + "/bin/";
Dave Allison65fcc2c2014-04-28 13:45:27 -0700134}
135
136void dump(std::vector<uint8_t>& code, const char* testname) {
137 // This will only work on the host. There is no as, objcopy or objdump on the
138 // device.
139#ifndef HAVE_ANDROID_OS
140 static bool results_ok = false;
141 static std::string toolsdir;
142
143 if (!results_ok) {
144 setup_results();
145 toolsdir = GetAndroidToolsDir();
146 SetAndroidData();
147 results_ok = true;
148 }
149
150 ScratchFile file;
151
152 const char* filename = file.GetFilename().c_str();
153
154 std::ofstream out(filename);
155 if (out) {
156 out << ".section \".text\"\n";
157 out << ".syntax unified\n";
158 out << ".arch armv7-a\n";
159 out << ".thumb\n";
160 out << ".thumb_func\n";
161 out << ".type " << testname << ", #function\n";
162 out << ".global " << testname << "\n";
163 out << testname << ":\n";
164 out << ".fnstart\n";
165
166 for (uint32_t i = 0 ; i < code.size(); ++i) {
167 out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
168 }
169 out << ".fnend\n";
170 out << ".size " << testname << ", .-" << testname << "\n";
171 }
172 out.close();
173
Andreas Gampe4470c1d2014-07-21 18:32:59 -0700174 char cmd[1024];
Dave Allison65fcc2c2014-04-28 13:45:27 -0700175
176 // Assemble the .S
Nicolas Geoffray96f89a22014-07-11 10:57:49 +0100177 snprintf(cmd, sizeof(cmd), "%s%sas %s -o %s.o", toolsdir.c_str(), TOOL_PREFIX, filename, filename);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700178 system(cmd);
179
180 // Remove the $d symbols to prevent the disassembler dumping the instructions
181 // as .word
Nicolas Geoffray96f89a22014-07-11 10:57:49 +0100182 snprintf(cmd, sizeof(cmd), "%s%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), TOOL_PREFIX,
Dave Allison65fcc2c2014-04-28 13:45:27 -0700183 filename, filename);
184 system(cmd);
185
186 // Disassemble.
187
Nicolas Geoffray96f89a22014-07-11 10:57:49 +0100188 snprintf(cmd, sizeof(cmd), "%s%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
189 toolsdir.c_str(), TOOL_PREFIX, filename);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700190 if (kPrintResults) {
191 // Print the results only, don't check. This is used to generate new output for inserting
192 // into the .inc file.
193 system(cmd);
194 } else {
195 // Check the results match the appropriate results in the .inc file.
196 FILE *fp = popen(cmd, "r");
197 ASSERT_TRUE(fp != nullptr);
198
199 std::map<std::string, const char**>::iterator results = test_results.find(testname);
200 ASSERT_NE(results, test_results.end());
201
202 uint32_t lineindex = 0;
203
204 while (!feof(fp)) {
205 char testline[256];
206 char *s = fgets(testline, sizeof(testline), fp);
207 if (s == nullptr) {
208 break;
209 }
Dave Allison45fdb932014-06-25 12:37:10 -0700210 if (CompareIgnoringSpace(results->second[lineindex], testline) != 0) {
211 LOG(FATAL) << "Output is not as expected at line: " << lineindex
212 << results->second[lineindex] << "/" << testline;
213 }
Dave Allison65fcc2c2014-04-28 13:45:27 -0700214 ++lineindex;
215 }
216 // Check that we are at the end.
217 ASSERT_TRUE(results->second[lineindex] == nullptr);
218 fclose(fp);
219 }
220
221 char buf[FILENAME_MAX];
222 snprintf(buf, sizeof(buf), "%s.o", filename);
223 unlink(buf);
224
225 snprintf(buf, sizeof(buf), "%s.oo", filename);
226 unlink(buf);
227#endif
228}
229
230#define __ assembler->
231
232TEST(Thumb2AssemblerTest, SimpleMov) {
233 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
234
235 __ mov(R0, ShifterOperand(R1));
236 __ mov(R8, ShifterOperand(R9));
237
238 __ mov(R0, ShifterOperand(1));
239 __ mov(R8, ShifterOperand(9));
240
241 size_t cs = __ CodeSize();
242 std::vector<uint8_t> managed_code(cs);
243 MemoryRegion code(&managed_code[0], managed_code.size());
244 __ FinalizeInstructions(code);
245 dump(managed_code, "SimpleMov");
246 delete assembler;
247}
248
249TEST(Thumb2AssemblerTest, SimpleMov32) {
250 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
251 assembler->Force32Bit();
252
253 __ mov(R0, ShifterOperand(R1));
254 __ mov(R8, ShifterOperand(R9));
255
256 size_t cs = __ CodeSize();
257 std::vector<uint8_t> managed_code(cs);
258 MemoryRegion code(&managed_code[0], managed_code.size());
259 __ FinalizeInstructions(code);
260 dump(managed_code, "SimpleMov32");
261 delete assembler;
262}
263
264TEST(Thumb2AssemblerTest, SimpleMovAdd) {
265 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
266
267 __ mov(R0, ShifterOperand(R1));
268 __ add(R0, R1, ShifterOperand(R2));
269 __ add(R0, R1, ShifterOperand());
270
271 size_t cs = __ CodeSize();
272 std::vector<uint8_t> managed_code(cs);
273 MemoryRegion code(&managed_code[0], managed_code.size());
274 __ FinalizeInstructions(code);
275 dump(managed_code, "SimpleMovAdd");
276 delete assembler;
277}
278
279TEST(Thumb2AssemblerTest, DataProcessingRegister) {
280 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
281
282 __ mov(R0, ShifterOperand(R1));
283 __ mvn(R0, ShifterOperand(R1));
284
285 // 32 bit variants.
286 __ add(R0, R1, ShifterOperand(R2));
287 __ sub(R0, R1, ShifterOperand(R2));
288 __ and_(R0, R1, ShifterOperand(R2));
289 __ orr(R0, R1, ShifterOperand(R2));
290 __ eor(R0, R1, ShifterOperand(R2));
291 __ bic(R0, R1, ShifterOperand(R2));
292 __ adc(R0, R1, ShifterOperand(R2));
293 __ sbc(R0, R1, ShifterOperand(R2));
294 __ rsb(R0, R1, ShifterOperand(R2));
295
296 // 16 bit variants.
297 __ add(R0, R1, ShifterOperand());
298 __ sub(R0, R1, ShifterOperand());
299 __ and_(R0, R1, ShifterOperand());
300 __ orr(R0, R1, ShifterOperand());
301 __ eor(R0, R1, ShifterOperand());
302 __ bic(R0, R1, ShifterOperand());
303 __ adc(R0, R1, ShifterOperand());
304 __ sbc(R0, R1, ShifterOperand());
305 __ rsb(R0, R1, ShifterOperand());
306
307 __ tst(R0, ShifterOperand(R1));
308 __ teq(R0, ShifterOperand(R1));
309 __ cmp(R0, ShifterOperand(R1));
310 __ cmn(R0, ShifterOperand(R1));
311
312 __ movs(R0, ShifterOperand(R1));
313 __ mvns(R0, ShifterOperand(R1));
314
315 size_t cs = __ CodeSize();
316 std::vector<uint8_t> managed_code(cs);
317 MemoryRegion code(&managed_code[0], managed_code.size());
318 __ FinalizeInstructions(code);
319 dump(managed_code, "DataProcessingRegister");
320 delete assembler;
321}
322
323TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
324 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
325
326 __ mov(R0, ShifterOperand(0x55));
327 __ mvn(R0, ShifterOperand(0x55));
328 __ add(R0, R1, ShifterOperand(0x55));
329 __ sub(R0, R1, ShifterOperand(0x55));
330 __ and_(R0, R1, ShifterOperand(0x55));
331 __ orr(R0, R1, ShifterOperand(0x55));
332 __ eor(R0, R1, ShifterOperand(0x55));
333 __ bic(R0, R1, ShifterOperand(0x55));
334 __ adc(R0, R1, ShifterOperand(0x55));
335 __ sbc(R0, R1, ShifterOperand(0x55));
336 __ rsb(R0, R1, ShifterOperand(0x55));
337
338 __ tst(R0, ShifterOperand(0x55));
339 __ teq(R0, ShifterOperand(0x55));
340 __ cmp(R0, ShifterOperand(0x55));
341 __ cmn(R0, ShifterOperand(0x55));
342
343 __ add(R0, R1, ShifterOperand(5));
344 __ sub(R0, R1, ShifterOperand(5));
345
346 __ movs(R0, ShifterOperand(0x55));
347 __ mvns(R0, ShifterOperand(0x55));
348
349 size_t cs = __ CodeSize();
350 std::vector<uint8_t> managed_code(cs);
351 MemoryRegion code(&managed_code[0], managed_code.size());
352 __ FinalizeInstructions(code);
353 dump(managed_code, "DataProcessingImmediate");
354 delete assembler;
355}
356
357TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
358 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
359
360 __ mov(R0, ShifterOperand(0x550055));
361 __ mvn(R0, ShifterOperand(0x550055));
362 __ add(R0, R1, ShifterOperand(0x550055));
363 __ sub(R0, R1, ShifterOperand(0x550055));
364 __ and_(R0, R1, ShifterOperand(0x550055));
365 __ orr(R0, R1, ShifterOperand(0x550055));
366 __ eor(R0, R1, ShifterOperand(0x550055));
367 __ bic(R0, R1, ShifterOperand(0x550055));
368 __ adc(R0, R1, ShifterOperand(0x550055));
369 __ sbc(R0, R1, ShifterOperand(0x550055));
370 __ rsb(R0, R1, ShifterOperand(0x550055));
371
372 __ tst(R0, ShifterOperand(0x550055));
373 __ teq(R0, ShifterOperand(0x550055));
374 __ cmp(R0, ShifterOperand(0x550055));
375 __ cmn(R0, ShifterOperand(0x550055));
376
377 size_t cs = __ CodeSize();
378 std::vector<uint8_t> managed_code(cs);
379 MemoryRegion code(&managed_code[0], managed_code.size());
380 __ FinalizeInstructions(code);
381 dump(managed_code, "DataProcessingModifiedImmediate");
382 delete assembler;
383}
384
385
386TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
387 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
388
389 __ mov(R0, ShifterOperand(0x550055));
390 __ mov(R0, ShifterOperand(0x55005500));
391 __ mov(R0, ShifterOperand(0x55555555));
392 __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
393 __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
394 __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
395 __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
396
397 size_t cs = __ CodeSize();
398 std::vector<uint8_t> managed_code(cs);
399 MemoryRegion code(&managed_code[0], managed_code.size());
400 __ FinalizeInstructions(code);
401 dump(managed_code, "DataProcessingModifiedImmediates");
402 delete assembler;
403}
404
405TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
406 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
407
408 __ mov(R3, ShifterOperand(R4, LSL, 4));
409 __ mov(R3, ShifterOperand(R4, LSR, 5));
410 __ mov(R3, ShifterOperand(R4, ASR, 6));
411 __ mov(R3, ShifterOperand(R4, ROR, 7));
412 __ mov(R3, ShifterOperand(R4, ROR));
413
414 // 32 bit variants.
415 __ mov(R8, ShifterOperand(R4, LSL, 4));
416 __ mov(R8, ShifterOperand(R4, LSR, 5));
417 __ mov(R8, ShifterOperand(R4, ASR, 6));
418 __ mov(R8, ShifterOperand(R4, ROR, 7));
419 __ mov(R8, ShifterOperand(R4, RRX));
420
421 size_t cs = __ CodeSize();
422 std::vector<uint8_t> managed_code(cs);
423 MemoryRegion code(&managed_code[0], managed_code.size());
424 __ FinalizeInstructions(code);
425 dump(managed_code, "DataProcessingShiftedRegister");
426 delete assembler;
427}
428
429
430TEST(Thumb2AssemblerTest, BasicLoad) {
431 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
432
433 __ ldr(R3, Address(R4, 24));
434 __ ldrb(R3, Address(R4, 24));
435 __ ldrh(R3, Address(R4, 24));
436 __ ldrsb(R3, Address(R4, 24));
437 __ ldrsh(R3, Address(R4, 24));
438
439 __ ldr(R3, Address(SP, 24));
440
441 // 32 bit variants
442 __ ldr(R8, Address(R4, 24));
443 __ ldrb(R8, Address(R4, 24));
444 __ ldrh(R8, Address(R4, 24));
445 __ ldrsb(R8, Address(R4, 24));
446 __ ldrsh(R8, Address(R4, 24));
447
448 size_t cs = __ CodeSize();
449 std::vector<uint8_t> managed_code(cs);
450 MemoryRegion code(&managed_code[0], managed_code.size());
451 __ FinalizeInstructions(code);
452 dump(managed_code, "BasicLoad");
453 delete assembler;
454}
455
456
457TEST(Thumb2AssemblerTest, BasicStore) {
458 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
459
460 __ str(R3, Address(R4, 24));
461 __ strb(R3, Address(R4, 24));
462 __ strh(R3, Address(R4, 24));
463
464 __ str(R3, Address(SP, 24));
465
466 // 32 bit variants.
467 __ str(R8, Address(R4, 24));
468 __ strb(R8, Address(R4, 24));
469 __ strh(R8, Address(R4, 24));
470
471 size_t cs = __ CodeSize();
472 std::vector<uint8_t> managed_code(cs);
473 MemoryRegion code(&managed_code[0], managed_code.size());
474 __ FinalizeInstructions(code);
475 dump(managed_code, "BasicStore");
476 delete assembler;
477}
478
479TEST(Thumb2AssemblerTest, ComplexLoad) {
480 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
481
482 __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
483 __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
484 __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
485 __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
486 __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
487 __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
488
489 __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
490 __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
491 __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
492 __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
493 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
494 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
495
496 __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
497 __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
498 __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
499 __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
500 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
501 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
502
503 __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
504 __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
505 __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
506 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
507 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
508 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
509
510 __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
511 __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
512 __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
513 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
514 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
515 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
516
517 size_t cs = __ CodeSize();
518 std::vector<uint8_t> managed_code(cs);
519 MemoryRegion code(&managed_code[0], managed_code.size());
520 __ FinalizeInstructions(code);
521 dump(managed_code, "ComplexLoad");
522 delete assembler;
523}
524
525
526TEST(Thumb2AssemblerTest, ComplexStore) {
527 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
528
529 __ str(R3, Address(R4, 24, Address::Mode::Offset));
530 __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
531 __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
532 __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
533 __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
534 __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
535
536 __ strb(R3, Address(R4, 24, Address::Mode::Offset));
537 __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
538 __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
539 __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
540 __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
541 __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
542
543 __ strh(R3, Address(R4, 24, Address::Mode::Offset));
544 __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
545 __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
546 __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
547 __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
548 __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
549
550 size_t cs = __ CodeSize();
551 std::vector<uint8_t> managed_code(cs);
552 MemoryRegion code(&managed_code[0], managed_code.size());
553 __ FinalizeInstructions(code);
554 dump(managed_code, "ComplexStore");
555 delete assembler;
556}
557
558TEST(Thumb2AssemblerTest, NegativeLoadStore) {
559 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
560
561 __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
562 __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
563 __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
564 __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
565 __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
566 __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
567
568 __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
569 __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
570 __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
571 __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
572 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
573 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
574
575 __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
576 __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
577 __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
578 __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
579 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
580 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
581
582 __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
583 __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
584 __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
585 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
586 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
587 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
588
589 __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
590 __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
591 __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
592 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
593 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
594 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
595
596 __ str(R3, Address(R4, -24, Address::Mode::Offset));
597 __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
598 __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
599 __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
600 __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
601 __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
602
603 __ strb(R3, Address(R4, -24, Address::Mode::Offset));
604 __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
605 __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
606 __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
607 __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
608 __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
609
610 __ strh(R3, Address(R4, -24, Address::Mode::Offset));
611 __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
612 __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
613 __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
614 __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
615 __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
616
617 size_t cs = __ CodeSize();
618 std::vector<uint8_t> managed_code(cs);
619 MemoryRegion code(&managed_code[0], managed_code.size());
620 __ FinalizeInstructions(code);
621 dump(managed_code, "NegativeLoadStore");
622 delete assembler;
623}
624
625TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
626 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
627
628 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
629 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
630
631 size_t cs = __ CodeSize();
632 std::vector<uint8_t> managed_code(cs);
633 MemoryRegion code(&managed_code[0], managed_code.size());
634 __ FinalizeInstructions(code);
635 dump(managed_code, "SimpleLoadStoreDual");
636 delete assembler;
637}
638
639TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
640 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
641
642 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
643 __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
644 __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
645 __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
646 __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
647 __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
648
649 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
650 __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
651 __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
652 __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
653 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
654 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
655
656 size_t cs = __ CodeSize();
657 std::vector<uint8_t> managed_code(cs);
658 MemoryRegion code(&managed_code[0], managed_code.size());
659 __ FinalizeInstructions(code);
660 dump(managed_code, "ComplexLoadStoreDual");
661 delete assembler;
662}
663
664TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
665 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
666
667 __ strd(R2, Address(R0, -24, Address::Mode::Offset));
668 __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
669 __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
670 __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
671 __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
672 __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
673
674 __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
675 __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
676 __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
677 __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
678 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
679 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
680
681 size_t cs = __ CodeSize();
682 std::vector<uint8_t> managed_code(cs);
683 MemoryRegion code(&managed_code[0], managed_code.size());
684 __ FinalizeInstructions(code);
685 dump(managed_code, "NegativeLoadStoreDual");
686 delete assembler;
687}
688
689TEST(Thumb2AssemblerTest, SimpleBranch) {
690 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
691
692 Label l1;
693 __ mov(R0, ShifterOperand(2));
694 __ Bind(&l1);
695 __ mov(R1, ShifterOperand(1));
696 __ b(&l1);
697 Label l2;
698 __ b(&l2);
699 __ mov(R1, ShifterOperand(2));
700 __ Bind(&l2);
701 __ mov(R0, ShifterOperand(3));
702
703 Label l3;
704 __ mov(R0, ShifterOperand(2));
705 __ Bind(&l3);
706 __ mov(R1, ShifterOperand(1));
707 __ b(&l3, EQ);
708
709 Label l4;
710 __ b(&l4, EQ);
711 __ mov(R1, ShifterOperand(2));
712 __ Bind(&l4);
713 __ mov(R0, ShifterOperand(3));
714
715 // 2 linked labels.
716 Label l5;
717 __ b(&l5);
718 __ mov(R1, ShifterOperand(4));
719 __ b(&l5);
720 __ mov(R1, ShifterOperand(5));
721 __ Bind(&l5);
722 __ mov(R0, ShifterOperand(6));
723
724 size_t cs = __ CodeSize();
725 std::vector<uint8_t> managed_code(cs);
726 MemoryRegion code(&managed_code[0], managed_code.size());
727 __ FinalizeInstructions(code);
728 dump(managed_code, "SimpleBranch");
729 delete assembler;
730}
731
732TEST(Thumb2AssemblerTest, LongBranch) {
733 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
734 assembler->Force32Bit();
735 // 32 bit branches.
736 Label l1;
737 __ mov(R0, ShifterOperand(2));
738 __ Bind(&l1);
739 __ mov(R1, ShifterOperand(1));
740 __ b(&l1);
741
742 Label l2;
743 __ b(&l2);
744 __ mov(R1, ShifterOperand(2));
745 __ Bind(&l2);
746 __ mov(R0, ShifterOperand(3));
747
748 Label l3;
749 __ mov(R0, ShifterOperand(2));
750 __ Bind(&l3);
751 __ mov(R1, ShifterOperand(1));
752 __ b(&l3, EQ);
753
754 Label l4;
755 __ b(&l4, EQ);
756 __ mov(R1, ShifterOperand(2));
757 __ Bind(&l4);
758 __ mov(R0, ShifterOperand(3));
759
760 // 2 linked labels.
761 Label l5;
762 __ b(&l5);
763 __ mov(R1, ShifterOperand(4));
764 __ b(&l5);
765 __ mov(R1, ShifterOperand(5));
766 __ Bind(&l5);
767 __ mov(R0, ShifterOperand(6));
768
769 size_t cs = __ CodeSize();
770 std::vector<uint8_t> managed_code(cs);
771 MemoryRegion code(&managed_code[0], managed_code.size());
772 __ FinalizeInstructions(code);
773 dump(managed_code, "LongBranch");
774 delete assembler;
775}
776
777TEST(Thumb2AssemblerTest, LoadMultiple) {
778 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
779
780 // 16 bit.
781 __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
782
783 // 32 bit.
784 __ ldm(DB_W, R4, (1 << LR | 1 << R11));
785 __ ldm(DB, R4, (1 << LR | 1 << R11));
786
787 // Single reg is converted to ldr
788 __ ldm(DB_W, R4, (1 << R5));
789
790 size_t cs = __ CodeSize();
791 std::vector<uint8_t> managed_code(cs);
792 MemoryRegion code(&managed_code[0], managed_code.size());
793 __ FinalizeInstructions(code);
794 dump(managed_code, "LoadMultiple");
795 delete assembler;
796}
797
798TEST(Thumb2AssemblerTest, StoreMultiple) {
799 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
800
801 // 16 bit.
802 __ stm(IA_W, R4, (1 << R0 | 1 << R3));
803
804 // 32 bit.
805 __ stm(IA_W, R4, (1 << LR | 1 << R11));
806 __ stm(IA, R4, (1 << LR | 1 << R11));
807
808 // Single reg is converted to str
809 __ stm(IA_W, R4, (1 << R5));
810 __ stm(IA, R4, (1 << R5));
811
812 size_t cs = __ CodeSize();
813 std::vector<uint8_t> managed_code(cs);
814 MemoryRegion code(&managed_code[0], managed_code.size());
815 __ FinalizeInstructions(code);
816 dump(managed_code, "StoreMultiple");
817 delete assembler;
818}
819
820TEST(Thumb2AssemblerTest, MovWMovT) {
821 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
822
823 __ movw(R4, 0); // 16 bit.
824 __ movw(R4, 0x34); // 16 bit.
825 __ movw(R9, 0x34); // 32 bit due to high register.
826 __ movw(R3, 0x1234); // 32 bit due to large value.
827 __ movw(R9, 0xffff); // 32 bit due to large value and high register.
828
829 // Always 32 bit.
830 __ movt(R0, 0);
831 __ movt(R0, 0x1234);
832 __ movt(R1, 0xffff);
833
834 size_t cs = __ CodeSize();
835 std::vector<uint8_t> managed_code(cs);
836 MemoryRegion code(&managed_code[0], managed_code.size());
837 __ FinalizeInstructions(code);
838 dump(managed_code, "MovWMovT");
839 delete assembler;
840}
841
842TEST(Thumb2AssemblerTest, SpecialAddSub) {
843 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
844
845 __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
846 __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
847 __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
848
849 __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
850 __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
851
852 __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
853 __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
854 __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
855
856 __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
857
858 size_t cs = __ CodeSize();
859 std::vector<uint8_t> managed_code(cs);
860 MemoryRegion code(&managed_code[0], managed_code.size());
861 __ FinalizeInstructions(code);
862 dump(managed_code, "SpecialAddSub");
863 delete assembler;
864}
865
866TEST(Thumb2AssemblerTest, StoreToOffset) {
867 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
868
869 __ StoreToOffset(kStoreWord, R2, R4, 12); // Simple
870 __ StoreToOffset(kStoreWord, R2, R4, 0x2000); // Offset too big.
871
872 size_t cs = __ CodeSize();
873 std::vector<uint8_t> managed_code(cs);
874 MemoryRegion code(&managed_code[0], managed_code.size());
875 __ FinalizeInstructions(code);
876 dump(managed_code, "StoreToOffset");
877 delete assembler;
878}
879
880
881TEST(Thumb2AssemblerTest, IfThen) {
882 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
883
884 __ it(EQ);
885 __ mov(R1, ShifterOperand(1), EQ);
886
887 __ it(EQ, kItThen);
888 __ mov(R1, ShifterOperand(1), EQ);
889 __ mov(R2, ShifterOperand(2), EQ);
890
891 __ it(EQ, kItElse);
892 __ mov(R1, ShifterOperand(1), EQ);
893 __ mov(R2, ShifterOperand(2), NE);
894
895 __ it(EQ, kItThen, kItElse);
896 __ mov(R1, ShifterOperand(1), EQ);
897 __ mov(R2, ShifterOperand(2), EQ);
898 __ mov(R3, ShifterOperand(3), NE);
899
900 __ it(EQ, kItElse, kItElse);
901 __ mov(R1, ShifterOperand(1), EQ);
902 __ mov(R2, ShifterOperand(2), NE);
903 __ mov(R3, ShifterOperand(3), NE);
904
905 __ it(EQ, kItThen, kItThen, kItElse);
906 __ mov(R1, ShifterOperand(1), EQ);
907 __ mov(R2, ShifterOperand(2), EQ);
908 __ mov(R3, ShifterOperand(3), EQ);
909 __ mov(R4, ShifterOperand(4), NE);
910
911 size_t cs = __ CodeSize();
912 std::vector<uint8_t> managed_code(cs);
913 MemoryRegion code(&managed_code[0], managed_code.size());
914 __ FinalizeInstructions(code);
915 dump(managed_code, "IfThen");
916 delete assembler;
917}
918
919TEST(Thumb2AssemblerTest, CbzCbnz) {
920 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
921
922 Label l1;
923 __ cbz(R2, &l1);
924 __ mov(R1, ShifterOperand(3));
925 __ mov(R2, ShifterOperand(3));
926 __ Bind(&l1);
927 __ mov(R2, ShifterOperand(4));
928
929 Label l2;
930 __ cbnz(R2, &l2);
931 __ mov(R8, ShifterOperand(3));
932 __ mov(R2, ShifterOperand(3));
933 __ Bind(&l2);
934 __ mov(R2, ShifterOperand(4));
935
936 size_t cs = __ CodeSize();
937 std::vector<uint8_t> managed_code(cs);
938 MemoryRegion code(&managed_code[0], managed_code.size());
939 __ FinalizeInstructions(code);
940 dump(managed_code, "CbzCbnz");
941 delete assembler;
942}
943
944TEST(Thumb2AssemblerTest, Multiply) {
945 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
946
947 __ mul(R0, R1, R0);
948 __ mul(R0, R1, R2);
949 __ mul(R8, R9, R8);
950 __ mul(R8, R9, R10);
951
952 __ mla(R0, R1, R2, R3);
953 __ mla(R8, R9, R8, R9);
954
955 __ mls(R0, R1, R2, R3);
956 __ mls(R8, R9, R8, R9);
957
958 __ umull(R0, R1, R2, R3);
959 __ umull(R8, R9, R10, R11);
960
961 size_t cs = __ CodeSize();
962 std::vector<uint8_t> managed_code(cs);
963 MemoryRegion code(&managed_code[0], managed_code.size());
964 __ FinalizeInstructions(code);
965 dump(managed_code, "Multiply");
966 delete assembler;
967}
968
969TEST(Thumb2AssemblerTest, Divide) {
970 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
971
972 __ sdiv(R0, R1, R2);
973 __ sdiv(R8, R9, R10);
974
975 __ udiv(R0, R1, R2);
976 __ udiv(R8, R9, R10);
977
978 size_t cs = __ CodeSize();
979 std::vector<uint8_t> managed_code(cs);
980 MemoryRegion code(&managed_code[0], managed_code.size());
981 __ FinalizeInstructions(code);
982 dump(managed_code, "Divide");
983 delete assembler;
984}
985
986TEST(Thumb2AssemblerTest, VMov) {
987 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
988
989 __ vmovs(S1, 1.0);
990 __ vmovd(D1, 1.0);
991
992 __ vmovs(S1, S2);
993 __ vmovd(D1, D2);
994
995 size_t cs = __ CodeSize();
996 std::vector<uint8_t> managed_code(cs);
997 MemoryRegion code(&managed_code[0], managed_code.size());
998 __ FinalizeInstructions(code);
999 dump(managed_code, "VMov");
1000 delete assembler;
1001}
1002
1003
1004TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
1005 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1006
1007 __ vadds(S0, S1, S2);
1008 __ vsubs(S0, S1, S2);
1009 __ vmuls(S0, S1, S2);
1010 __ vmlas(S0, S1, S2);
1011 __ vmlss(S0, S1, S2);
1012 __ vdivs(S0, S1, S2);
1013 __ vabss(S0, S1);
1014 __ vnegs(S0, S1);
1015 __ vsqrts(S0, S1);
1016
1017 __ vaddd(D0, D1, D2);
1018 __ vsubd(D0, D1, D2);
1019 __ vmuld(D0, D1, D2);
1020 __ vmlad(D0, D1, D2);
1021 __ vmlsd(D0, D1, D2);
1022 __ vdivd(D0, D1, D2);
1023 __ vabsd(D0, D1);
1024 __ vnegd(D0, D1);
1025 __ vsqrtd(D0, D1);
1026
1027 size_t cs = __ CodeSize();
1028 std::vector<uint8_t> managed_code(cs);
1029 MemoryRegion code(&managed_code[0], managed_code.size());
1030 __ FinalizeInstructions(code);
1031 dump(managed_code, "BasicFloatingPoint");
1032 delete assembler;
1033}
1034
1035TEST(Thumb2AssemblerTest, FloatingPointConversions) {
1036 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1037
1038 __ vcvtsd(S2, D2);
1039 __ vcvtds(D2, S2);
1040
1041 __ vcvtis(S1, S2);
1042 __ vcvtsi(S1, S2);
1043
1044 __ vcvtid(S1, D2);
1045 __ vcvtdi(D1, S2);
1046
1047 __ vcvtus(S1, S2);
1048 __ vcvtsu(S1, S2);
1049
1050 __ vcvtud(S1, D2);
1051 __ vcvtdu(D1, S2);
1052
1053 size_t cs = __ CodeSize();
1054 std::vector<uint8_t> managed_code(cs);
1055 MemoryRegion code(&managed_code[0], managed_code.size());
1056 __ FinalizeInstructions(code);
1057 dump(managed_code, "FloatingPointConversions");
1058 delete assembler;
1059}
1060
1061TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
1062 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1063
1064 __ vcmps(S0, S1);
1065 __ vcmpd(D0, D1);
1066
1067 __ vcmpsz(S2);
1068 __ vcmpdz(D2);
1069
1070 size_t cs = __ CodeSize();
1071 std::vector<uint8_t> managed_code(cs);
1072 MemoryRegion code(&managed_code[0], managed_code.size());
1073 __ FinalizeInstructions(code);
1074 dump(managed_code, "FloatingPointComparisons");
1075 delete assembler;
1076}
1077
1078TEST(Thumb2AssemblerTest, Calls) {
1079 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1080
1081 __ blx(LR);
1082 __ bx(LR);
1083
1084 size_t cs = __ CodeSize();
1085 std::vector<uint8_t> managed_code(cs);
1086 MemoryRegion code(&managed_code[0], managed_code.size());
1087 __ FinalizeInstructions(code);
1088 dump(managed_code, "Calls");
1089 delete assembler;
1090}
1091
1092TEST(Thumb2AssemblerTest, Breakpoint) {
1093 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1094
1095 __ bkpt(0);
1096
1097 size_t cs = __ CodeSize();
1098 std::vector<uint8_t> managed_code(cs);
1099 MemoryRegion code(&managed_code[0], managed_code.size());
1100 __ FinalizeInstructions(code);
1101 dump(managed_code, "Breakpoint");
1102 delete assembler;
1103}
1104
1105TEST(Thumb2AssemblerTest, StrR1) {
1106 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1107
1108 __ str(R1, Address(SP, 68));
1109 __ str(R1, Address(SP, 1068));
1110
1111 size_t cs = __ CodeSize();
1112 std::vector<uint8_t> managed_code(cs);
1113 MemoryRegion code(&managed_code[0], managed_code.size());
1114 __ FinalizeInstructions(code);
1115 dump(managed_code, "StrR1");
1116 delete assembler;
1117}
1118
1119TEST(Thumb2AssemblerTest, VPushPop) {
1120 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1121
1122 __ vpushs(S2, 4);
1123 __ vpushd(D2, 4);
1124
1125 __ vpops(S2, 4);
1126 __ vpopd(D2, 4);
1127
1128 size_t cs = __ CodeSize();
1129 std::vector<uint8_t> managed_code(cs);
1130 MemoryRegion code(&managed_code[0], managed_code.size());
1131 __ FinalizeInstructions(code);
1132 dump(managed_code, "VPushPop");
1133 delete assembler;
1134}
1135
1136TEST(Thumb2AssemblerTest, Max16BitBranch) {
1137 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1138
1139 Label l1;
1140 __ b(&l1);
1141 for (int i = 0 ; i < (1 << 11) ; i += 2) {
1142 __ mov(R3, ShifterOperand(i & 0xff));
1143 }
1144 __ Bind(&l1);
1145 __ mov(R1, ShifterOperand(R2));
1146
1147 size_t cs = __ CodeSize();
1148 std::vector<uint8_t> managed_code(cs);
1149 MemoryRegion code(&managed_code[0], managed_code.size());
1150 __ FinalizeInstructions(code);
1151 dump(managed_code, "Max16BitBranch");
1152 delete assembler;
1153}
1154
1155TEST(Thumb2AssemblerTest, Branch32) {
1156 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1157
1158 Label l1;
1159 __ b(&l1);
1160 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1161 __ mov(R3, ShifterOperand(i & 0xff));
1162 }
1163 __ Bind(&l1);
1164 __ mov(R1, ShifterOperand(R2));
1165
1166 size_t cs = __ CodeSize();
1167 std::vector<uint8_t> managed_code(cs);
1168 MemoryRegion code(&managed_code[0], managed_code.size());
1169 __ FinalizeInstructions(code);
1170 dump(managed_code, "Branch32");
1171 delete assembler;
1172}
1173
1174TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
1175 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1176
1177 Label l1;
1178 __ cbz(R4, &l1);
1179 for (int i = 0 ; i < (1 << 7) ; i += 2) {
1180 __ mov(R3, ShifterOperand(i & 0xff));
1181 }
1182 __ Bind(&l1);
1183 __ mov(R1, ShifterOperand(R2));
1184
1185 size_t cs = __ CodeSize();
1186 std::vector<uint8_t> managed_code(cs);
1187 MemoryRegion code(&managed_code[0], managed_code.size());
1188 __ FinalizeInstructions(code);
1189 dump(managed_code, "CompareAndBranchMax");
1190 delete assembler;
1191}
1192
1193TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1194 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1195
1196 Label l1;
1197 __ cbz(R4, &l1);
1198 for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1199 __ mov(R3, ShifterOperand(i & 0xff));
1200 }
1201 __ Bind(&l1);
1202 __ mov(R1, ShifterOperand(R2));
1203
1204 size_t cs = __ CodeSize();
1205 std::vector<uint8_t> managed_code(cs);
1206 MemoryRegion code(&managed_code[0], managed_code.size());
1207 __ FinalizeInstructions(code);
1208 dump(managed_code, "CompareAndBranchRelocation16");
1209 delete assembler;
1210}
1211
1212TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1213 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1214
1215 Label l1;
1216 __ cbz(R4, &l1);
1217 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1218 __ mov(R3, ShifterOperand(i & 0xff));
1219 }
1220 __ Bind(&l1);
1221 __ mov(R1, ShifterOperand(R2));
1222
1223 size_t cs = __ CodeSize();
1224 std::vector<uint8_t> managed_code(cs);
1225 MemoryRegion code(&managed_code[0], managed_code.size());
1226 __ FinalizeInstructions(code);
1227 dump(managed_code, "CompareAndBranchRelocation32");
1228 delete assembler;
1229}
1230
1231TEST(Thumb2AssemblerTest, MixedBranch32) {
1232 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1233
1234 Label l1;
1235 Label l2;
1236 __ b(&l1); // Forwards.
1237 __ Bind(&l2);
1238
1239 // Space to force relocation.
1240 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1241 __ mov(R3, ShifterOperand(i & 0xff));
1242 }
1243 __ b(&l2); // Backwards.
1244 __ Bind(&l1);
1245 __ mov(R1, ShifterOperand(R2));
1246
1247 size_t cs = __ CodeSize();
1248 std::vector<uint8_t> managed_code(cs);
1249 MemoryRegion code(&managed_code[0], managed_code.size());
1250 __ FinalizeInstructions(code);
1251 dump(managed_code, "MixedBranch32");
1252 delete assembler;
1253}
1254
Dave Allison45fdb932014-06-25 12:37:10 -07001255TEST(Thumb2AssemblerTest, Shifts) {
1256 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1257
1258 // 16 bit
1259 __ Lsl(R0, R1, 5);
1260 __ Lsr(R0, R1, 5);
1261 __ Asr(R0, R1, 5);
1262
1263 __ Lsl(R0, R0, R1);
1264 __ Lsr(R0, R0, R1);
1265 __ Asr(R0, R0, R1);
1266
1267 // 32 bit due to high registers.
1268 __ Lsl(R8, R1, 5);
1269 __ Lsr(R0, R8, 5);
1270 __ Asr(R8, R1, 5);
1271 __ Ror(R0, R8, 5);
1272
1273 // 32 bit due to different Rd and Rn.
1274 __ Lsl(R0, R1, R2);
1275 __ Lsr(R0, R1, R2);
1276 __ Asr(R0, R1, R2);
1277 __ Ror(R0, R1, R2);
1278
1279 // 32 bit due to use of high registers.
1280 __ Lsl(R8, R1, R2);
1281 __ Lsr(R0, R8, R2);
1282 __ Asr(R0, R1, R8);
1283
1284 // S bit (all 32 bit)
1285
1286 // 32 bit due to high registers.
1287 __ Lsl(R8, R1, 5, true);
1288 __ Lsr(R0, R8, 5, true);
1289 __ Asr(R8, R1, 5, true);
1290 __ Ror(R0, R8, 5, true);
1291
1292 // 32 bit due to different Rd and Rn.
1293 __ Lsl(R0, R1, R2, true);
1294 __ Lsr(R0, R1, R2, true);
1295 __ Asr(R0, R1, R2, true);
1296 __ Ror(R0, R1, R2, true);
1297
1298 // 32 bit due to use of high registers.
1299 __ Lsl(R8, R1, R2, true);
1300 __ Lsr(R0, R8, R2, true);
1301 __ Asr(R0, R1, R8, true);
1302
1303 size_t cs = __ CodeSize();
1304 std::vector<uint8_t> managed_code(cs);
1305 MemoryRegion code(&managed_code[0], managed_code.size());
1306 __ FinalizeInstructions(code);
1307 dump(managed_code, "Shifts");
1308 delete assembler;
1309}
1310
1311TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
1312 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1313
1314 // 16 bit.
1315 __ ldr(R0, Address(R1, R2));
1316 __ str(R0, Address(R1, R2));
1317
1318 // 32 bit due to shift.
1319 __ ldr(R0, Address(R1, R2, LSL, 1));
1320 __ str(R0, Address(R1, R2, LSL, 1));
1321
1322 __ ldr(R0, Address(R1, R2, LSL, 3));
1323 __ str(R0, Address(R1, R2, LSL, 3));
1324
1325 // 32 bit due to high register use.
1326 __ ldr(R8, Address(R1, R2));
1327 __ str(R8, Address(R1, R2));
1328
1329 __ ldr(R1, Address(R8, R2));
1330 __ str(R2, Address(R8, R2));
1331
1332 __ ldr(R0, Address(R1, R8));
1333 __ str(R0, Address(R1, R8));
1334
1335 size_t cs = __ CodeSize();
1336 std::vector<uint8_t> managed_code(cs);
1337 MemoryRegion code(&managed_code[0], managed_code.size());
1338 __ FinalizeInstructions(code);
1339 dump(managed_code, "LoadStoreRegOffset");
1340 delete assembler;
1341}
1342
1343TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
1344 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1345
1346 __ ldr(R0, Address(4));
1347 __ str(R0, Address(4));
1348
1349 __ ldr(R0, Address(-8));
1350 __ str(R0, Address(-8));
1351
1352 // Limits.
1353 __ ldr(R0, Address(0x3ff)); // 10 bits (16 bit).
1354 __ ldr(R0, Address(0x7ff)); // 11 bits (32 bit).
1355 __ str(R0, Address(0x3ff)); // 32 bit (no 16 bit str(literal)).
1356 __ str(R0, Address(0x7ff)); // 11 bits (32 bit).
1357
1358 size_t cs = __ CodeSize();
1359 std::vector<uint8_t> managed_code(cs);
1360 MemoryRegion code(&managed_code[0], managed_code.size());
1361 __ FinalizeInstructions(code);
1362 dump(managed_code, "LoadStoreLiteral");
1363 delete assembler;
1364}
1365
Dave Allison0bb9ade2014-06-26 17:57:36 -07001366TEST(Thumb2AssemblerTest, LoadStoreLimits) {
1367 arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1368
1369 __ ldr(R0, Address(R4, 124)); // 16 bit.
1370 __ ldr(R0, Address(R4, 128)); // 32 bit.
1371
1372 __ ldrb(R0, Address(R4, 31)); // 16 bit.
1373 __ ldrb(R0, Address(R4, 32)); // 32 bit.
1374
1375 __ ldrh(R0, Address(R4, 62)); // 16 bit.
1376 __ ldrh(R0, Address(R4, 64)); // 32 bit.
1377
1378 __ ldrsb(R0, Address(R4, 31)); // 32 bit.
1379 __ ldrsb(R0, Address(R4, 32)); // 32 bit.
1380
1381 __ ldrsh(R0, Address(R4, 62)); // 32 bit.
1382 __ ldrsh(R0, Address(R4, 64)); // 32 bit.
1383
1384 __ str(R0, Address(R4, 124)); // 16 bit.
1385 __ str(R0, Address(R4, 128)); // 32 bit.
1386
1387 __ strb(R0, Address(R4, 31)); // 16 bit.
1388 __ strb(R0, Address(R4, 32)); // 32 bit.
1389
1390 __ strh(R0, Address(R4, 62)); // 16 bit.
1391 __ strh(R0, Address(R4, 64)); // 32 bit.
1392
1393 size_t cs = __ CodeSize();
1394 std::vector<uint8_t> managed_code(cs);
1395 MemoryRegion code(&managed_code[0], managed_code.size());
1396 __ FinalizeInstructions(code);
1397 dump(managed_code, "LoadStoreLimits");
1398 delete assembler;
1399}
1400
Dave Allison65fcc2c2014-04-28 13:45:27 -07001401#undef __
1402} // namespace arm
1403} // namespace art