blob: 9c9271db33daf8934ee3ddd3b21f551a3c4a1aa2 [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010017#include <dirent.h>
Andreas Gampefd114702015-05-13 17:00:41 -070018#include <errno.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070019#include <fstream>
Nicolas Geoffray96f89a22014-07-11 10:57:49 +010020#include <map>
Andreas Gampefd114702015-05-13 17:00:41 -070021#include <string.h>
22#include <sys/types.h>
Dave Allison65fcc2c2014-04-28 13:45:27 -070023
24#include "gtest/gtest.h"
25#include "utils/arm/assembler_thumb2.h"
26#include "base/hex_dump.h"
27#include "common_runtime_test.h"
28
29namespace art {
30namespace arm {
31
32// Include results file (generated manually)
33#include "assembler_thumb_test_expected.cc.inc"
34
Bilyan Borisovbb661c02016-04-04 16:27:32 +010035#ifndef ART_TARGET_ANDROID
Dave Allison45fdb932014-06-25 12:37:10 -070036// This controls whether the results are printed to the
37// screen or compared against the expected output.
38// To generate new expected output, set this to true and
39// copy the output into the .cc.inc file in the form
40// of the other results.
41//
42// When this is false, the results are not printed to the
43// output, but are compared against the expected results
44// in the .cc.inc file.
Dave Allison65fcc2c2014-04-28 13:45:27 -070045static constexpr bool kPrintResults = false;
Dave Allisond20ddb22014-06-05 14:16:30 -070046#endif
Dave Allison65fcc2c2014-04-28 13:45:27 -070047
48void SetAndroidData() {
49 const char* data = getenv("ANDROID_DATA");
50 if (data == nullptr) {
51 setenv("ANDROID_DATA", "/tmp", 1);
52 }
53}
54
Dave Allison45fdb932014-06-25 12:37:10 -070055int CompareIgnoringSpace(const char* s1, const char* s2) {
56 while (*s1 != '\0') {
57 while (isspace(*s1)) ++s1;
58 while (isspace(*s2)) ++s2;
59 if (*s1 == '\0' || *s1 != *s2) {
60 break;
61 }
62 ++s1;
63 ++s2;
64 }
65 return *s1 - *s2;
66}
67
Vladimir Markocf93a5c2015-06-16 11:33:24 +000068void InitResults() {
69 if (test_results.empty()) {
70 setup_results();
71 }
72}
73
74std::string GetToolsDir() {
Bilyan Borisovbb661c02016-04-04 16:27:32 +010075#ifndef ART_TARGET_ANDROID
Vladimir Markocf93a5c2015-06-16 11:33:24 +000076 // This will only work on the host. There is no as, objcopy or objdump on the device.
Dave Allison65fcc2c2014-04-28 13:45:27 -070077 static std::string toolsdir;
78
Vladimir Markocf93a5c2015-06-16 11:33:24 +000079 if (toolsdir.empty()) {
Dave Allison65fcc2c2014-04-28 13:45:27 -070080 setup_results();
David Srbecky3e52aa42015-04-12 07:45:18 +010081 toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
Dave Allison65fcc2c2014-04-28 13:45:27 -070082 SetAndroidData();
Dave Allison65fcc2c2014-04-28 13:45:27 -070083 }
84
Vladimir Markocf93a5c2015-06-16 11:33:24 +000085 return toolsdir;
86#else
87 return std::string();
88#endif
89}
90
91void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
Bilyan Borisovbb661c02016-04-04 16:27:32 +010092#ifndef ART_TARGET_ANDROID
Vladimir Markocf93a5c2015-06-16 11:33:24 +000093 static std::string toolsdir = GetToolsDir();
94
Dave Allison65fcc2c2014-04-28 13:45:27 -070095 ScratchFile file;
96
97 const char* filename = file.GetFilename().c_str();
98
99 std::ofstream out(filename);
100 if (out) {
101 out << ".section \".text\"\n";
102 out << ".syntax unified\n";
103 out << ".arch armv7-a\n";
104 out << ".thumb\n";
105 out << ".thumb_func\n";
106 out << ".type " << testname << ", #function\n";
107 out << ".global " << testname << "\n";
108 out << testname << ":\n";
109 out << ".fnstart\n";
110
111 for (uint32_t i = 0 ; i < code.size(); ++i) {
112 out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
113 }
114 out << ".fnend\n";
115 out << ".size " << testname << ", .-" << testname << "\n";
116 }
117 out.close();
118
Andreas Gampe4470c1d2014-07-21 18:32:59 -0700119 char cmd[1024];
Dave Allison65fcc2c2014-04-28 13:45:27 -0700120
121 // Assemble the .S
David Srbecky3e52aa42015-04-12 07:45:18 +0100122 snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700123 int cmd_result = system(cmd);
124 ASSERT_EQ(cmd_result, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700125
126 // Remove the $d symbols to prevent the disassembler dumping the instructions
127 // as .word
David Srbecky3e52aa42015-04-12 07:45:18 +0100128 snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
Andreas Gampefd114702015-05-13 17:00:41 -0700129 int cmd_result2 = system(cmd);
130 ASSERT_EQ(cmd_result2, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700131
132 // Disassemble.
133
David Srbecky3e52aa42015-04-12 07:45:18 +0100134 snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
135 toolsdir.c_str(), filename);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700136 if (kPrintResults) {
137 // Print the results only, don't check. This is used to generate new output for inserting
Vladimir Markof5c09c32015-12-17 12:08:08 +0000138 // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
139 strcat(cmd, " | sed '-es/^/ \"/' | sed '-es/$/\\\\n\",/'");
Andreas Gampefd114702015-05-13 17:00:41 -0700140 int cmd_result3 = system(cmd);
141 ASSERT_EQ(cmd_result3, 0) << strerror(errno);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700142 } else {
143 // Check the results match the appropriate results in the .inc file.
144 FILE *fp = popen(cmd, "r");
145 ASSERT_TRUE(fp != nullptr);
146
Dave Allison65fcc2c2014-04-28 13:45:27 -0700147 uint32_t lineindex = 0;
148
149 while (!feof(fp)) {
150 char testline[256];
151 char *s = fgets(testline, sizeof(testline), fp);
152 if (s == nullptr) {
153 break;
154 }
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000155 if (CompareIgnoringSpace(results[lineindex], testline) != 0) {
Dave Allison45fdb932014-06-25 12:37:10 -0700156 LOG(FATAL) << "Output is not as expected at line: " << lineindex
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000157 << results[lineindex] << "/" << testline;
Dave Allison45fdb932014-06-25 12:37:10 -0700158 }
Dave Allison65fcc2c2014-04-28 13:45:27 -0700159 ++lineindex;
160 }
161 // Check that we are at the end.
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000162 ASSERT_TRUE(results[lineindex] == nullptr);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700163 fclose(fp);
164 }
165
166 char buf[FILENAME_MAX];
167 snprintf(buf, sizeof(buf), "%s.o", filename);
168 unlink(buf);
169
170 snprintf(buf, sizeof(buf), "%s.oo", filename);
171 unlink(buf);
Bilyan Borisovbb661c02016-04-04 16:27:32 +0100172#endif // ART_TARGET_ANDROID
Dave Allison65fcc2c2014-04-28 13:45:27 -0700173}
174
175#define __ assembler->
176
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000177void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
178 const char* const* results) {
179 __ FinalizeCode();
180 size_t cs = __ CodeSize();
181 std::vector<uint8_t> managed_code(cs);
182 MemoryRegion code(&managed_code[0], managed_code.size());
183 __ FinalizeInstructions(code);
184
185 DumpAndCheck(managed_code, testname, results);
186}
187
188void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
189 InitResults();
190 std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
191 ASSERT_NE(results, test_results.end());
192
193 EmitAndCheck(assembler, testname, results->second);
194}
195
196#undef __
197
Vladimir Marko93205e32016-04-13 11:59:46 +0100198class Thumb2AssemblerTest : public ::testing::Test {
199 public:
200 Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
201
202 ArenaPool pool;
203 ArenaAllocator arena;
204 arm::Thumb2Assembler assembler;
205};
206
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000207#define __ assembler.
208
Vladimir Marko93205e32016-04-13 11:59:46 +0100209TEST_F(Thumb2AssemblerTest, SimpleMov) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100210 __ movs(R0, ShifterOperand(R1));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700211 __ mov(R0, ShifterOperand(R1));
212 __ mov(R8, ShifterOperand(R9));
213
214 __ mov(R0, ShifterOperand(1));
215 __ mov(R8, ShifterOperand(9));
216
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000217 EmitAndCheck(&assembler, "SimpleMov");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700218}
219
Vladimir Marko93205e32016-04-13 11:59:46 +0100220TEST_F(Thumb2AssemblerTest, SimpleMov32) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000221 __ Force32Bit();
Dave Allison65fcc2c2014-04-28 13:45:27 -0700222
223 __ mov(R0, ShifterOperand(R1));
224 __ mov(R8, ShifterOperand(R9));
225
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000226 EmitAndCheck(&assembler, "SimpleMov32");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700227}
228
Vladimir Marko93205e32016-04-13 11:59:46 +0100229TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700230 __ mov(R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100231 __ adds(R0, R1, ShifterOperand(R2));
232 __ add(R0, R1, ShifterOperand(0));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700233
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000234 EmitAndCheck(&assembler, "SimpleMovAdd");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700235}
236
Vladimir Marko93205e32016-04-13 11:59:46 +0100237TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100238 // 32 bit variants using low registers.
239 __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
240 __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
241 __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
242 __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
243 __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100244 __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100245 __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
246 __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
247 __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
248 __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
249 __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
250 __ teq(R0, ShifterOperand(R1));
251
252 // 16 bit variants using low registers.
253 __ movs(R0, ShifterOperand(R1));
254 __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
255 __ mvns(R0, ShifterOperand(R1));
256 __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
257 __ adds(R0, R1, ShifterOperand(R2));
258 __ subs(R0, R1, ShifterOperand(R2));
259 __ adcs(R0, R0, ShifterOperand(R1));
260 __ sbcs(R0, R0, ShifterOperand(R1));
261 __ ands(R0, R0, ShifterOperand(R1));
262 __ orrs(R0, R0, ShifterOperand(R1));
263 __ eors(R0, R0, ShifterOperand(R1));
264 __ bics(R0, R0, ShifterOperand(R1));
265 __ tst(R0, ShifterOperand(R1));
266 __ cmp(R0, ShifterOperand(R1));
267 __ cmn(R0, ShifterOperand(R1));
268
269 // 16-bit variants using high registers.
270 __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
271 __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
272 __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
273 __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
274 __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
275 __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
276 __ cmp(R0, ShifterOperand(R9));
277 __ cmp(R8, ShifterOperand(R1));
278 __ cmp(R9, ShifterOperand(R8));
279
280 // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
281 // an immediate (0) but emitted without any, so we test it here.
282 __ rsbs(R0, R1, ShifterOperand(0));
283 __ rsbs(R0, R0, ShifterOperand(0)); // Check Rd == Rn code path.
284
285 // 32 bit variants using high registers that would be 16-bit if using low registers.
286 __ movs(R0, ShifterOperand(R8));
287 __ mvns(R0, ShifterOperand(R8));
288 __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
289 __ adds(R0, R1, ShifterOperand(R8));
290 __ subs(R0, R1, ShifterOperand(R8));
291 __ adcs(R0, R0, ShifterOperand(R8));
292 __ sbcs(R0, R0, ShifterOperand(R8));
293 __ ands(R0, R0, ShifterOperand(R8));
294 __ orrs(R0, R0, ShifterOperand(R8));
295 __ eors(R0, R0, ShifterOperand(R8));
296 __ bics(R0, R0, ShifterOperand(R8));
297 __ tst(R0, ShifterOperand(R8));
298 __ cmn(R0, ShifterOperand(R8));
299 __ rsbs(R0, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit.
300 __ rsbs(R8, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit (Rd == Rn).
301
302 // 32-bit variants of instructions that would be 16-bit outside IT block.
303 __ it(arm::EQ);
304 __ mvns(R0, ShifterOperand(R1), arm::EQ);
305 __ it(arm::EQ);
306 __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
307 __ it(arm::EQ);
308 __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
309 __ it(arm::EQ);
310 __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
311 __ it(arm::EQ);
312 __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
313 __ it(arm::EQ);
314 __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
315 __ it(arm::EQ);
316 __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
317 __ it(arm::EQ);
318 __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
319 __ it(arm::EQ);
320 __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
321
322 // 16-bit variants of instructions that would be 32-bit outside IT block.
323 __ it(arm::EQ);
324 __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
325 __ it(arm::EQ);
326 __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
327 __ it(arm::EQ);
328 __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
329 __ it(arm::EQ);
330 __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
331 __ it(arm::EQ);
332 __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
333 __ it(arm::EQ);
334 __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
335 __ it(arm::EQ);
336 __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
337 __ it(arm::EQ);
338 __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
339 __ it(arm::EQ);
340 __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
341
342 // 16 bit variants selected for the default kCcDontCare.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700343 __ mov(R0, ShifterOperand(R1));
344 __ mvn(R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100345 __ add(R0, R0, ShifterOperand(R1));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700346 __ add(R0, R1, ShifterOperand(R2));
347 __ sub(R0, R1, ShifterOperand(R2));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100348 __ adc(R0, R0, ShifterOperand(R1));
349 __ sbc(R0, R0, ShifterOperand(R1));
Andreas Gampe7b7e5242015-02-02 19:17:11 -0800350 __ and_(R0, R0, ShifterOperand(R1));
351 __ orr(R0, R0, ShifterOperand(R1));
352 __ eor(R0, R0, ShifterOperand(R1));
353 __ bic(R0, R0, ShifterOperand(R1));
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100354 __ mov(R1, ShifterOperand(R8));
355 __ mov(R9, ShifterOperand(R0));
356 __ mov(R8, ShifterOperand(R9));
357 __ add(R1, R1, ShifterOperand(R8));
358 __ add(R9, R9, ShifterOperand(R0));
359 __ add(R8, R8, ShifterOperand(R9));
360 __ rsb(R0, R1, ShifterOperand(0));
361 __ rsb(R0, R0, ShifterOperand(0));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700362
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100363 // And an arbitrary 32-bit instruction using IP.
364 __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +0100365
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000366 EmitAndCheck(&assembler, "DataProcessingRegister");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700367}
368
Vladimir Marko93205e32016-04-13 11:59:46 +0100369TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700370 __ mov(R0, ShifterOperand(0x55));
371 __ mvn(R0, ShifterOperand(0x55));
372 __ add(R0, R1, ShifterOperand(0x55));
373 __ sub(R0, R1, ShifterOperand(0x55));
374 __ and_(R0, R1, ShifterOperand(0x55));
375 __ orr(R0, R1, ShifterOperand(0x55));
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100376 __ orn(R0, R1, ShifterOperand(0x55));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700377 __ eor(R0, R1, ShifterOperand(0x55));
378 __ bic(R0, R1, ShifterOperand(0x55));
379 __ adc(R0, R1, ShifterOperand(0x55));
380 __ sbc(R0, R1, ShifterOperand(0x55));
381 __ rsb(R0, R1, ShifterOperand(0x55));
382
383 __ tst(R0, ShifterOperand(0x55));
384 __ teq(R0, ShifterOperand(0x55));
385 __ cmp(R0, ShifterOperand(0x55));
386 __ cmn(R0, ShifterOperand(0x55));
387
388 __ add(R0, R1, ShifterOperand(5));
389 __ sub(R0, R1, ShifterOperand(5));
390
391 __ movs(R0, ShifterOperand(0x55));
392 __ mvns(R0, ShifterOperand(0x55));
393
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100394 __ adds(R0, R1, ShifterOperand(5));
395 __ subs(R0, R1, ShifterOperand(5));
396
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000397 EmitAndCheck(&assembler, "DataProcessingImmediate");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700398}
399
Vladimir Marko93205e32016-04-13 11:59:46 +0100400TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700401 __ mov(R0, ShifterOperand(0x550055));
402 __ mvn(R0, ShifterOperand(0x550055));
403 __ add(R0, R1, ShifterOperand(0x550055));
404 __ sub(R0, R1, ShifterOperand(0x550055));
405 __ and_(R0, R1, ShifterOperand(0x550055));
406 __ orr(R0, R1, ShifterOperand(0x550055));
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100407 __ orn(R0, R1, ShifterOperand(0x550055));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700408 __ eor(R0, R1, ShifterOperand(0x550055));
409 __ bic(R0, R1, ShifterOperand(0x550055));
410 __ adc(R0, R1, ShifterOperand(0x550055));
411 __ sbc(R0, R1, ShifterOperand(0x550055));
412 __ rsb(R0, R1, ShifterOperand(0x550055));
413
414 __ tst(R0, ShifterOperand(0x550055));
415 __ teq(R0, ShifterOperand(0x550055));
416 __ cmp(R0, ShifterOperand(0x550055));
417 __ cmn(R0, ShifterOperand(0x550055));
418
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000419 EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700420}
421
422
Vladimir Marko93205e32016-04-13 11:59:46 +0100423TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700424 __ mov(R0, ShifterOperand(0x550055));
425 __ mov(R0, ShifterOperand(0x55005500));
426 __ mov(R0, ShifterOperand(0x55555555));
427 __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
428 __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
429 __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
430 __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
431
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000432 EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700433}
434
Vladimir Marko93205e32016-04-13 11:59:46 +0100435TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100436 // 16-bit variants.
437 __ movs(R3, ShifterOperand(R4, LSL, 4));
438 __ movs(R3, ShifterOperand(R4, LSR, 5));
439 __ movs(R3, ShifterOperand(R4, ASR, 6));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700440
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100441 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
442 __ movs(R3, ShifterOperand(R4, ROR, 7));
443
444 // 32-bit RRX because RRX has no 16-bit version.
445 __ movs(R3, ShifterOperand(R4, RRX));
446
447 // 32 bit variants (not setting condition codes).
448 __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
449 __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
450 __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
451 __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
452 __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
453
454 // 32 bit variants (high registers).
455 __ movs(R8, ShifterOperand(R4, LSL, 4));
456 __ movs(R8, ShifterOperand(R4, LSR, 5));
457 __ movs(R8, ShifterOperand(R4, ASR, 6));
458 __ movs(R8, ShifterOperand(R4, ROR, 7));
459 __ movs(R8, ShifterOperand(R4, RRX));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700460
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000461 EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700462}
463
Vladimir Marko93205e32016-04-13 11:59:46 +0100464TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
Vladimir Markof9d741e2015-11-20 15:08:11 +0000465 // Note: This test produces the same results as DataProcessingShiftedRegister
466 // but it does so using shift functions instead of mov().
Vladimir Markof9d741e2015-11-20 15:08:11 +0000467
468 // 16-bit variants.
469 __ Lsl(R3, R4, 4);
470 __ Lsr(R3, R4, 5);
471 __ Asr(R3, R4, 6);
472
473 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
474 __ Ror(R3, R4, 7);
475
476 // 32-bit RRX because RRX has no 16-bit version.
477 __ Rrx(R3, R4);
478
479 // 32 bit variants (not setting condition codes).
480 __ Lsl(R3, R4, 4, AL, kCcKeep);
481 __ Lsr(R3, R4, 5, AL, kCcKeep);
482 __ Asr(R3, R4, 6, AL, kCcKeep);
483 __ Ror(R3, R4, 7, AL, kCcKeep);
484 __ Rrx(R3, R4, AL, kCcKeep);
485
486 // 32 bit variants (high registers).
487 __ Lsls(R8, R4, 4);
488 __ Lsrs(R8, R4, 5);
489 __ Asrs(R8, R4, 6);
490 __ Rors(R8, R4, 7);
491 __ Rrxs(R8, R4);
492
493 EmitAndCheck(&assembler, "ShiftImmediate");
494}
Dave Allison65fcc2c2014-04-28 13:45:27 -0700495
Vladimir Marko93205e32016-04-13 11:59:46 +0100496TEST_F(Thumb2AssemblerTest, BasicLoad) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700497 __ ldr(R3, Address(R4, 24));
498 __ ldrb(R3, Address(R4, 24));
499 __ ldrh(R3, Address(R4, 24));
500 __ ldrsb(R3, Address(R4, 24));
501 __ ldrsh(R3, Address(R4, 24));
502
503 __ ldr(R3, Address(SP, 24));
504
505 // 32 bit variants
506 __ ldr(R8, Address(R4, 24));
507 __ ldrb(R8, Address(R4, 24));
508 __ ldrh(R8, Address(R4, 24));
509 __ ldrsb(R8, Address(R4, 24));
510 __ ldrsh(R8, Address(R4, 24));
511
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000512 EmitAndCheck(&assembler, "BasicLoad");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700513}
514
515
Vladimir Marko93205e32016-04-13 11:59:46 +0100516TEST_F(Thumb2AssemblerTest, BasicStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700517 __ str(R3, Address(R4, 24));
518 __ strb(R3, Address(R4, 24));
519 __ strh(R3, Address(R4, 24));
520
521 __ str(R3, Address(SP, 24));
522
523 // 32 bit variants.
524 __ str(R8, Address(R4, 24));
525 __ strb(R8, Address(R4, 24));
526 __ strh(R8, Address(R4, 24));
527
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000528 EmitAndCheck(&assembler, "BasicStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700529}
530
Vladimir Marko93205e32016-04-13 11:59:46 +0100531TEST_F(Thumb2AssemblerTest, ComplexLoad) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700532 __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
533 __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
534 __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
535 __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
536 __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
537 __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
538
539 __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
540 __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
541 __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
542 __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
543 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
544 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
545
546 __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
547 __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
548 __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
549 __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
550 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
551 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
552
553 __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
554 __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
555 __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
556 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
557 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
558 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
559
560 __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
561 __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
562 __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
563 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
564 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
565 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
566
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000567 EmitAndCheck(&assembler, "ComplexLoad");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700568}
569
570
Vladimir Marko93205e32016-04-13 11:59:46 +0100571TEST_F(Thumb2AssemblerTest, ComplexStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700572 __ str(R3, Address(R4, 24, Address::Mode::Offset));
573 __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
574 __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
575 __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
576 __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
577 __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
578
579 __ strb(R3, Address(R4, 24, Address::Mode::Offset));
580 __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
581 __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
582 __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
583 __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
584 __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
585
586 __ strh(R3, Address(R4, 24, Address::Mode::Offset));
587 __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
588 __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
589 __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
590 __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
591 __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
592
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000593 EmitAndCheck(&assembler, "ComplexStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700594}
595
Vladimir Marko93205e32016-04-13 11:59:46 +0100596TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700597 __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
598 __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
599 __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
600 __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
601 __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
602 __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
603
604 __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
605 __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
606 __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
607 __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
608 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
609 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
610
611 __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
612 __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
613 __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
614 __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
615 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
616 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
617
618 __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
619 __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
620 __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
621 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
622 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
623 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
624
625 __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
626 __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
627 __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
628 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
629 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
630 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
631
632 __ str(R3, Address(R4, -24, Address::Mode::Offset));
633 __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
634 __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
635 __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
636 __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
637 __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
638
639 __ strb(R3, Address(R4, -24, Address::Mode::Offset));
640 __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
641 __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
642 __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
643 __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
644 __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
645
646 __ strh(R3, Address(R4, -24, Address::Mode::Offset));
647 __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
648 __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
649 __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
650 __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
651 __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
652
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000653 EmitAndCheck(&assembler, "NegativeLoadStore");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700654}
655
Vladimir Marko93205e32016-04-13 11:59:46 +0100656TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700657 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
658 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
659
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000660 EmitAndCheck(&assembler, "SimpleLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700661}
662
Vladimir Marko93205e32016-04-13 11:59:46 +0100663TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700664 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
665 __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
666 __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
667 __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
668 __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
669 __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
670
671 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
672 __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
673 __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
674 __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
675 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
676 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
677
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000678 EmitAndCheck(&assembler, "ComplexLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700679}
680
Vladimir Marko93205e32016-04-13 11:59:46 +0100681TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700682 __ strd(R2, Address(R0, -24, Address::Mode::Offset));
683 __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
684 __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
685 __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
686 __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
687 __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
688
689 __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
690 __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
691 __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
692 __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
693 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
694 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
695
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000696 EmitAndCheck(&assembler, "NegativeLoadStoreDual");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700697}
698
Vladimir Marko93205e32016-04-13 11:59:46 +0100699TEST_F(Thumb2AssemblerTest, SimpleBranch) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700700 Label l1;
701 __ mov(R0, ShifterOperand(2));
702 __ Bind(&l1);
703 __ mov(R1, ShifterOperand(1));
704 __ b(&l1);
705 Label l2;
706 __ b(&l2);
707 __ mov(R1, ShifterOperand(2));
708 __ Bind(&l2);
709 __ mov(R0, ShifterOperand(3));
710
711 Label l3;
712 __ mov(R0, ShifterOperand(2));
713 __ Bind(&l3);
714 __ mov(R1, ShifterOperand(1));
715 __ b(&l3, EQ);
716
717 Label l4;
718 __ b(&l4, EQ);
719 __ mov(R1, ShifterOperand(2));
720 __ Bind(&l4);
721 __ mov(R0, ShifterOperand(3));
722
723 // 2 linked labels.
724 Label l5;
725 __ b(&l5);
726 __ mov(R1, ShifterOperand(4));
727 __ b(&l5);
728 __ mov(R1, ShifterOperand(5));
729 __ Bind(&l5);
730 __ mov(R0, ShifterOperand(6));
731
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000732 EmitAndCheck(&assembler, "SimpleBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700733}
734
Vladimir Marko93205e32016-04-13 11:59:46 +0100735TEST_F(Thumb2AssemblerTest, LongBranch) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000736 __ Force32Bit();
Dave Allison65fcc2c2014-04-28 13:45:27 -0700737 // 32 bit branches.
738 Label l1;
739 __ mov(R0, ShifterOperand(2));
740 __ Bind(&l1);
741 __ mov(R1, ShifterOperand(1));
742 __ b(&l1);
743
744 Label l2;
745 __ b(&l2);
746 __ mov(R1, ShifterOperand(2));
747 __ Bind(&l2);
748 __ mov(R0, ShifterOperand(3));
749
750 Label l3;
751 __ mov(R0, ShifterOperand(2));
752 __ Bind(&l3);
753 __ mov(R1, ShifterOperand(1));
754 __ b(&l3, EQ);
755
756 Label l4;
757 __ b(&l4, EQ);
758 __ mov(R1, ShifterOperand(2));
759 __ Bind(&l4);
760 __ mov(R0, ShifterOperand(3));
761
762 // 2 linked labels.
763 Label l5;
764 __ b(&l5);
765 __ mov(R1, ShifterOperand(4));
766 __ b(&l5);
767 __ mov(R1, ShifterOperand(5));
768 __ Bind(&l5);
769 __ mov(R0, ShifterOperand(6));
770
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000771 EmitAndCheck(&assembler, "LongBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700772}
773
Vladimir Marko93205e32016-04-13 11:59:46 +0100774TEST_F(Thumb2AssemblerTest, LoadMultiple) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700775 // 16 bit.
776 __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
777
778 // 32 bit.
779 __ ldm(DB_W, R4, (1 << LR | 1 << R11));
780 __ ldm(DB, R4, (1 << LR | 1 << R11));
781
782 // Single reg is converted to ldr
783 __ ldm(DB_W, R4, (1 << R5));
784
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000785 EmitAndCheck(&assembler, "LoadMultiple");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700786}
787
Vladimir Marko93205e32016-04-13 11:59:46 +0100788TEST_F(Thumb2AssemblerTest, StoreMultiple) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700789 // 16 bit.
790 __ stm(IA_W, R4, (1 << R0 | 1 << R3));
791
792 // 32 bit.
793 __ stm(IA_W, R4, (1 << LR | 1 << R11));
794 __ stm(IA, R4, (1 << LR | 1 << R11));
795
796 // Single reg is converted to str
797 __ stm(IA_W, R4, (1 << R5));
798 __ stm(IA, R4, (1 << R5));
799
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000800 EmitAndCheck(&assembler, "StoreMultiple");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700801}
802
Vladimir Marko93205e32016-04-13 11:59:46 +0100803TEST_F(Thumb2AssemblerTest, MovWMovT) {
Vladimir Markob4536b72015-11-24 13:45:23 +0000804 // Always 32 bit.
805 __ movw(R4, 0);
806 __ movw(R4, 0x34);
807 __ movw(R9, 0x34);
808 __ movw(R3, 0x1234);
809 __ movw(R9, 0xffff);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700810
811 // Always 32 bit.
812 __ movt(R0, 0);
813 __ movt(R0, 0x1234);
814 __ movt(R1, 0xffff);
815
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000816 EmitAndCheck(&assembler, "MovWMovT");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700817}
818
Vladimir Marko93205e32016-04-13 11:59:46 +0100819TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700820 __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
821 __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
822 __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
823
824 __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
825 __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000826 __ add(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700827
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000828 __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
829 __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
830 __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700831
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000832 __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
833 __ sub(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700834
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000835 EmitAndCheck(&assembler, "SpecialAddSub");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700836}
837
Vladimir Marko93205e32016-04-13 11:59:46 +0100838TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000839 __ LoadFromOffset(kLoadWord, R2, R4, 12);
840 __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
841 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
842 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
843 __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
844 __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
845 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
846 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
847 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
848 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
849 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
850 __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
851 __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
852 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
853 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
854 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
855 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
856 __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
857
858 __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
859 __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
860
861 __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
862 __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
863 __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
864
865 EmitAndCheck(&assembler, "LoadFromOffset");
866}
867
Vladimir Marko93205e32016-04-13 11:59:46 +0100868TEST_F(Thumb2AssemblerTest, StoreToOffset) {
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000869 __ StoreToOffset(kStoreWord, R2, R4, 12);
870 __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
871 __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
872 __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
873 __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
874 __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
875 __ StoreToOffset(kStoreHalfword, R2, R4, 12);
876 __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
877 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
878 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
879 __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
880 __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
881 __ StoreToOffset(kStoreWordPair, R2, R4, 12);
882 __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
883 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
884 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
885 __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
886 __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
887
888 __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
889 __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
890
891 __ StoreToOffset(kStoreByte, R2, R4, 12);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700892
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000893 EmitAndCheck(&assembler, "StoreToOffset");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700894}
895
Vladimir Marko93205e32016-04-13 11:59:46 +0100896TEST_F(Thumb2AssemblerTest, IfThen) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700897 __ it(EQ);
898 __ mov(R1, ShifterOperand(1), EQ);
899
900 __ it(EQ, kItThen);
901 __ mov(R1, ShifterOperand(1), EQ);
902 __ mov(R2, ShifterOperand(2), EQ);
903
904 __ it(EQ, kItElse);
905 __ mov(R1, ShifterOperand(1), EQ);
906 __ mov(R2, ShifterOperand(2), NE);
907
908 __ it(EQ, kItThen, kItElse);
909 __ mov(R1, ShifterOperand(1), EQ);
910 __ mov(R2, ShifterOperand(2), EQ);
911 __ mov(R3, ShifterOperand(3), NE);
912
913 __ it(EQ, kItElse, kItElse);
914 __ mov(R1, ShifterOperand(1), EQ);
915 __ mov(R2, ShifterOperand(2), NE);
916 __ mov(R3, ShifterOperand(3), NE);
917
918 __ it(EQ, kItThen, kItThen, kItElse);
919 __ mov(R1, ShifterOperand(1), EQ);
920 __ mov(R2, ShifterOperand(2), EQ);
921 __ mov(R3, ShifterOperand(3), EQ);
922 __ mov(R4, ShifterOperand(4), NE);
923
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000924 EmitAndCheck(&assembler, "IfThen");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700925}
926
Vladimir Marko93205e32016-04-13 11:59:46 +0100927TEST_F(Thumb2AssemblerTest, CbzCbnz) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700928 Label l1;
929 __ cbz(R2, &l1);
930 __ mov(R1, ShifterOperand(3));
931 __ mov(R2, ShifterOperand(3));
932 __ Bind(&l1);
933 __ mov(R2, ShifterOperand(4));
934
935 Label l2;
936 __ cbnz(R2, &l2);
937 __ mov(R8, ShifterOperand(3));
938 __ mov(R2, ShifterOperand(3));
939 __ Bind(&l2);
940 __ mov(R2, ShifterOperand(4));
941
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000942 EmitAndCheck(&assembler, "CbzCbnz");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700943}
944
Vladimir Marko93205e32016-04-13 11:59:46 +0100945TEST_F(Thumb2AssemblerTest, Multiply) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700946 __ mul(R0, R1, R0);
947 __ mul(R0, R1, R2);
948 __ mul(R8, R9, R8);
949 __ mul(R8, R9, R10);
950
951 __ mla(R0, R1, R2, R3);
952 __ mla(R8, R9, R8, R9);
953
954 __ mls(R0, R1, R2, R3);
955 __ mls(R8, R9, R8, R9);
956
957 __ umull(R0, R1, R2, R3);
958 __ umull(R8, R9, R10, R11);
959
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000960 EmitAndCheck(&assembler, "Multiply");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700961}
962
Vladimir Marko93205e32016-04-13 11:59:46 +0100963TEST_F(Thumb2AssemblerTest, Divide) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700964 __ sdiv(R0, R1, R2);
965 __ sdiv(R8, R9, R10);
966
967 __ udiv(R0, R1, R2);
968 __ udiv(R8, R9, R10);
969
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000970 EmitAndCheck(&assembler, "Divide");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700971}
972
Vladimir Marko93205e32016-04-13 11:59:46 +0100973TEST_F(Thumb2AssemblerTest, VMov) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700974 __ vmovs(S1, 1.0);
975 __ vmovd(D1, 1.0);
976
977 __ vmovs(S1, S2);
978 __ vmovd(D1, D2);
979
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000980 EmitAndCheck(&assembler, "VMov");
Dave Allison65fcc2c2014-04-28 13:45:27 -0700981}
982
983
Vladimir Marko93205e32016-04-13 11:59:46 +0100984TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700985 __ vadds(S0, S1, S2);
986 __ vsubs(S0, S1, S2);
987 __ vmuls(S0, S1, S2);
988 __ vmlas(S0, S1, S2);
989 __ vmlss(S0, S1, S2);
990 __ vdivs(S0, S1, S2);
991 __ vabss(S0, S1);
992 __ vnegs(S0, S1);
993 __ vsqrts(S0, S1);
994
995 __ vaddd(D0, D1, D2);
996 __ vsubd(D0, D1, D2);
997 __ vmuld(D0, D1, D2);
998 __ vmlad(D0, D1, D2);
999 __ vmlsd(D0, D1, D2);
1000 __ vdivd(D0, D1, D2);
1001 __ vabsd(D0, D1);
1002 __ vnegd(D0, D1);
1003 __ vsqrtd(D0, D1);
1004
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001005 EmitAndCheck(&assembler, "BasicFloatingPoint");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001006}
1007
Vladimir Marko93205e32016-04-13 11:59:46 +01001008TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001009 __ vcvtsd(S2, D2);
1010 __ vcvtds(D2, S2);
1011
1012 __ vcvtis(S1, S2);
1013 __ vcvtsi(S1, S2);
1014
1015 __ vcvtid(S1, D2);
1016 __ vcvtdi(D1, S2);
1017
1018 __ vcvtus(S1, S2);
1019 __ vcvtsu(S1, S2);
1020
1021 __ vcvtud(S1, D2);
1022 __ vcvtdu(D1, S2);
1023
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001024 EmitAndCheck(&assembler, "FloatingPointConversions");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001025}
1026
Vladimir Marko93205e32016-04-13 11:59:46 +01001027TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001028 __ vcmps(S0, S1);
1029 __ vcmpd(D0, D1);
1030
1031 __ vcmpsz(S2);
1032 __ vcmpdz(D2);
1033
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001034 EmitAndCheck(&assembler, "FloatingPointComparisons");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001035}
1036
Vladimir Marko93205e32016-04-13 11:59:46 +01001037TEST_F(Thumb2AssemblerTest, Calls) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001038 __ blx(LR);
1039 __ bx(LR);
1040
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001041 EmitAndCheck(&assembler, "Calls");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001042}
1043
Vladimir Marko93205e32016-04-13 11:59:46 +01001044TEST_F(Thumb2AssemblerTest, Breakpoint) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001045 __ bkpt(0);
1046
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001047 EmitAndCheck(&assembler, "Breakpoint");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001048}
1049
Vladimir Marko93205e32016-04-13 11:59:46 +01001050TEST_F(Thumb2AssemblerTest, StrR1) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001051 __ str(R1, Address(SP, 68));
1052 __ str(R1, Address(SP, 1068));
1053
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001054 EmitAndCheck(&assembler, "StrR1");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001055}
1056
Vladimir Marko93205e32016-04-13 11:59:46 +01001057TEST_F(Thumb2AssemblerTest, VPushPop) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001058 __ vpushs(S2, 4);
1059 __ vpushd(D2, 4);
1060
1061 __ vpops(S2, 4);
1062 __ vpopd(D2, 4);
1063
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001064 EmitAndCheck(&assembler, "VPushPop");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001065}
1066
Vladimir Marko93205e32016-04-13 11:59:46 +01001067TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001068 Label l1;
1069 __ b(&l1);
1070 for (int i = 0 ; i < (1 << 11) ; i += 2) {
1071 __ mov(R3, ShifterOperand(i & 0xff));
1072 }
1073 __ Bind(&l1);
1074 __ mov(R1, ShifterOperand(R2));
1075
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001076 EmitAndCheck(&assembler, "Max16BitBranch");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001077}
1078
Vladimir Marko93205e32016-04-13 11:59:46 +01001079TEST_F(Thumb2AssemblerTest, Branch32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001080 Label l1;
1081 __ b(&l1);
1082 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1083 __ mov(R3, ShifterOperand(i & 0xff));
1084 }
1085 __ Bind(&l1);
1086 __ mov(R1, ShifterOperand(R2));
1087
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001088 EmitAndCheck(&assembler, "Branch32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001089}
1090
Vladimir Marko93205e32016-04-13 11:59:46 +01001091TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001092 Label l1;
1093 __ cbz(R4, &l1);
1094 for (int i = 0 ; i < (1 << 7) ; i += 2) {
1095 __ mov(R3, ShifterOperand(i & 0xff));
1096 }
1097 __ Bind(&l1);
1098 __ mov(R1, ShifterOperand(R2));
1099
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001100 EmitAndCheck(&assembler, "CompareAndBranchMax");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001101}
1102
Vladimir Marko93205e32016-04-13 11:59:46 +01001103TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001104 Label l1;
1105 __ cbz(R4, &l1);
1106 for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1107 __ mov(R3, ShifterOperand(i & 0xff));
1108 }
1109 __ Bind(&l1);
1110 __ mov(R1, ShifterOperand(R2));
1111
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001112 EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001113}
1114
Vladimir Marko93205e32016-04-13 11:59:46 +01001115TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001116 Label l1;
1117 __ cbz(R4, &l1);
1118 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1119 __ mov(R3, ShifterOperand(i & 0xff));
1120 }
1121 __ Bind(&l1);
1122 __ mov(R1, ShifterOperand(R2));
1123
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001124 EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001125}
1126
Vladimir Marko93205e32016-04-13 11:59:46 +01001127TEST_F(Thumb2AssemblerTest, MixedBranch32) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001128 Label l1;
1129 Label l2;
1130 __ b(&l1); // Forwards.
1131 __ Bind(&l2);
1132
1133 // Space to force relocation.
1134 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1135 __ mov(R3, ShifterOperand(i & 0xff));
1136 }
1137 __ b(&l2); // Backwards.
1138 __ Bind(&l1);
1139 __ mov(R1, ShifterOperand(R2));
1140
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001141 EmitAndCheck(&assembler, "MixedBranch32");
Dave Allison65fcc2c2014-04-28 13:45:27 -07001142}
1143
Vladimir Marko93205e32016-04-13 11:59:46 +01001144TEST_F(Thumb2AssemblerTest, Shifts) {
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001145 // 16 bit selected for CcDontCare.
Dave Allison45fdb932014-06-25 12:37:10 -07001146 __ Lsl(R0, R1, 5);
1147 __ Lsr(R0, R1, 5);
1148 __ Asr(R0, R1, 5);
1149
1150 __ Lsl(R0, R0, R1);
1151 __ Lsr(R0, R0, R1);
1152 __ Asr(R0, R0, R1);
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001153 __ Ror(R0, R0, R1);
1154
1155 // 16 bit with kCcSet.
1156 __ Lsls(R0, R1, 5);
1157 __ Lsrs(R0, R1, 5);
1158 __ Asrs(R0, R1, 5);
1159
1160 __ Lsls(R0, R0, R1);
1161 __ Lsrs(R0, R0, R1);
1162 __ Asrs(R0, R0, R1);
1163 __ Rors(R0, R0, R1);
1164
1165 // 32-bit with kCcKeep.
1166 __ Lsl(R0, R1, 5, AL, kCcKeep);
1167 __ Lsr(R0, R1, 5, AL, kCcKeep);
1168 __ Asr(R0, R1, 5, AL, kCcKeep);
1169
1170 __ Lsl(R0, R0, R1, AL, kCcKeep);
1171 __ Lsr(R0, R0, R1, AL, kCcKeep);
1172 __ Asr(R0, R0, R1, AL, kCcKeep);
1173 __ Ror(R0, R0, R1, AL, kCcKeep);
1174
1175 // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
1176 __ Ror(R0, R1, 5);
1177 __ Rors(R0, R1, 5);
1178 __ Ror(R0, R1, 5, AL, kCcKeep);
Dave Allison45fdb932014-06-25 12:37:10 -07001179
1180 // 32 bit due to high registers.
1181 __ Lsl(R8, R1, 5);
1182 __ Lsr(R0, R8, 5);
1183 __ Asr(R8, R1, 5);
1184 __ Ror(R0, R8, 5);
1185
1186 // 32 bit due to different Rd and Rn.
1187 __ Lsl(R0, R1, R2);
1188 __ Lsr(R0, R1, R2);
1189 __ Asr(R0, R1, R2);
1190 __ Ror(R0, R1, R2);
1191
1192 // 32 bit due to use of high registers.
1193 __ Lsl(R8, R1, R2);
1194 __ Lsr(R0, R8, R2);
1195 __ Asr(R0, R1, R8);
1196
1197 // S bit (all 32 bit)
1198
1199 // 32 bit due to high registers.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001200 __ Lsls(R8, R1, 5);
1201 __ Lsrs(R0, R8, 5);
1202 __ Asrs(R8, R1, 5);
1203 __ Rors(R0, R8, 5);
Dave Allison45fdb932014-06-25 12:37:10 -07001204
1205 // 32 bit due to different Rd and Rn.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001206 __ Lsls(R0, R1, R2);
1207 __ Lsrs(R0, R1, R2);
1208 __ Asrs(R0, R1, R2);
1209 __ Rors(R0, R1, R2);
Dave Allison45fdb932014-06-25 12:37:10 -07001210
1211 // 32 bit due to use of high registers.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +01001212 __ Lsls(R8, R1, R2);
1213 __ Lsrs(R0, R8, R2);
1214 __ Asrs(R0, R1, R8);
Dave Allison45fdb932014-06-25 12:37:10 -07001215
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001216 EmitAndCheck(&assembler, "Shifts");
Dave Allison45fdb932014-06-25 12:37:10 -07001217}
1218
Vladimir Marko93205e32016-04-13 11:59:46 +01001219TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
Dave Allison45fdb932014-06-25 12:37:10 -07001220 // 16 bit.
1221 __ ldr(R0, Address(R1, R2));
1222 __ str(R0, Address(R1, R2));
1223
1224 // 32 bit due to shift.
1225 __ ldr(R0, Address(R1, R2, LSL, 1));
1226 __ str(R0, Address(R1, R2, LSL, 1));
1227
1228 __ ldr(R0, Address(R1, R2, LSL, 3));
1229 __ str(R0, Address(R1, R2, LSL, 3));
1230
1231 // 32 bit due to high register use.
1232 __ ldr(R8, Address(R1, R2));
1233 __ str(R8, Address(R1, R2));
1234
1235 __ ldr(R1, Address(R8, R2));
1236 __ str(R2, Address(R8, R2));
1237
1238 __ ldr(R0, Address(R1, R8));
1239 __ str(R0, Address(R1, R8));
1240
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001241 EmitAndCheck(&assembler, "LoadStoreRegOffset");
Dave Allison45fdb932014-06-25 12:37:10 -07001242}
1243
Vladimir Marko93205e32016-04-13 11:59:46 +01001244TEST_F(Thumb2AssemblerTest, LoadStoreLiteral) {
Dave Allison45fdb932014-06-25 12:37:10 -07001245 __ ldr(R0, Address(4));
1246 __ str(R0, Address(4));
1247
1248 __ ldr(R0, Address(-8));
1249 __ str(R0, Address(-8));
1250
1251 // Limits.
1252 __ ldr(R0, Address(0x3ff)); // 10 bits (16 bit).
1253 __ ldr(R0, Address(0x7ff)); // 11 bits (32 bit).
1254 __ str(R0, Address(0x3ff)); // 32 bit (no 16 bit str(literal)).
1255 __ str(R0, Address(0x7ff)); // 11 bits (32 bit).
1256
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001257 EmitAndCheck(&assembler, "LoadStoreLiteral");
Dave Allison45fdb932014-06-25 12:37:10 -07001258}
1259
Vladimir Marko93205e32016-04-13 11:59:46 +01001260TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
Dave Allison0bb9ade2014-06-26 17:57:36 -07001261 __ ldr(R0, Address(R4, 124)); // 16 bit.
1262 __ ldr(R0, Address(R4, 128)); // 32 bit.
1263
1264 __ ldrb(R0, Address(R4, 31)); // 16 bit.
1265 __ ldrb(R0, Address(R4, 32)); // 32 bit.
1266
1267 __ ldrh(R0, Address(R4, 62)); // 16 bit.
1268 __ ldrh(R0, Address(R4, 64)); // 32 bit.
1269
1270 __ ldrsb(R0, Address(R4, 31)); // 32 bit.
1271 __ ldrsb(R0, Address(R4, 32)); // 32 bit.
1272
1273 __ ldrsh(R0, Address(R4, 62)); // 32 bit.
1274 __ ldrsh(R0, Address(R4, 64)); // 32 bit.
1275
1276 __ str(R0, Address(R4, 124)); // 16 bit.
1277 __ str(R0, Address(R4, 128)); // 32 bit.
1278
1279 __ strb(R0, Address(R4, 31)); // 16 bit.
1280 __ strb(R0, Address(R4, 32)); // 32 bit.
1281
1282 __ strh(R0, Address(R4, 62)); // 16 bit.
1283 __ strh(R0, Address(R4, 64)); // 32 bit.
1284
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001285 EmitAndCheck(&assembler, "LoadStoreLimits");
Dave Allison0bb9ade2014-06-26 17:57:36 -07001286}
1287
Vladimir Marko93205e32016-04-13 11:59:46 +01001288TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001289 Label label;
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00001290 __ CompareAndBranchIfZero(arm::R0, &label);
1291 __ CompareAndBranchIfZero(arm::R11, &label);
1292 __ CompareAndBranchIfNonZero(arm::R0, &label);
1293 __ CompareAndBranchIfNonZero(arm::R11, &label);
1294 __ Bind(&label);
1295
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001296 EmitAndCheck(&assembler, "CompareAndBranch");
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00001297}
1298
Vladimir Marko93205e32016-04-13 11:59:46 +01001299TEST_F(Thumb2AssemblerTest, AddConstant) {
Vladimir Markof5c09c32015-12-17 12:08:08 +00001300 // Low registers, Rd != Rn.
1301 __ AddConstant(R0, R1, 0); // MOV.
1302 __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
1303 __ AddConstant(R0, R1, 7); // 16-bit ADDS, encoding T1.
1304 __ AddConstant(R0, R1, 8); // 32-bit ADD, encoding T3.
1305 __ AddConstant(R0, R1, 255); // 32-bit ADD, encoding T3.
1306 __ AddConstant(R0, R1, 256); // 32-bit ADD, encoding T3.
1307 __ AddConstant(R0, R1, 257); // 32-bit ADD, encoding T4.
1308 __ AddConstant(R0, R1, 0xfff); // 32-bit ADD, encoding T4.
1309 __ AddConstant(R0, R1, 0x1000); // 32-bit ADD, encoding T3.
1310 __ AddConstant(R0, R1, 0x1001); // MVN+SUB.
1311 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1312 __ AddConstant(R0, R1, 0xffff); // MOVW+ADD.
1313 __ AddConstant(R0, R1, 0x10000); // 32-bit ADD, encoding T3.
1314 __ AddConstant(R0, R1, 0x10001); // 32-bit ADD, encoding T3.
1315 __ AddConstant(R0, R1, 0x10002); // MVN+SUB.
1316 __ AddConstant(R0, R1, 0x10003); // MOVW+MOVT+ADD.
1317 __ AddConstant(R0, R1, -1); // 16-bit SUBS.
1318 __ AddConstant(R0, R1, -7); // 16-bit SUBS.
1319 __ AddConstant(R0, R1, -8); // 32-bit SUB, encoding T3.
1320 __ AddConstant(R0, R1, -255); // 32-bit SUB, encoding T3.
1321 __ AddConstant(R0, R1, -256); // 32-bit SUB, encoding T3.
1322 __ AddConstant(R0, R1, -257); // 32-bit SUB, encoding T4.
1323 __ AddConstant(R0, R1, -0xfff); // 32-bit SUB, encoding T4.
1324 __ AddConstant(R0, R1, -0x1000); // 32-bit SUB, encoding T3.
1325 __ AddConstant(R0, R1, -0x1001); // MVN+ADD.
1326 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1327 __ AddConstant(R0, R1, -0xffff); // MOVW+SUB.
1328 __ AddConstant(R0, R1, -0x10000); // 32-bit SUB, encoding T3.
1329 __ AddConstant(R0, R1, -0x10001); // 32-bit SUB, encoding T3.
1330 __ AddConstant(R0, R1, -0x10002); // MVN+ADD.
1331 __ AddConstant(R0, R1, -0x10003); // MOVW+MOVT+ADD.
1332
1333 // Low registers, Rd == Rn.
1334 __ AddConstant(R0, R0, 0); // Nothing.
1335 __ AddConstant(R1, R1, 1); // 16-bit ADDS, encoding T2,
1336 __ AddConstant(R0, R0, 7); // 16-bit ADDS, encoding T2.
1337 __ AddConstant(R1, R1, 8); // 16-bit ADDS, encoding T2.
1338 __ AddConstant(R0, R0, 255); // 16-bit ADDS, encoding T2.
1339 __ AddConstant(R1, R1, 256); // 32-bit ADD, encoding T3.
1340 __ AddConstant(R0, R0, 257); // 32-bit ADD, encoding T4.
1341 __ AddConstant(R1, R1, 0xfff); // 32-bit ADD, encoding T4.
1342 __ AddConstant(R0, R0, 0x1000); // 32-bit ADD, encoding T3.
1343 __ AddConstant(R1, R1, 0x1001); // MVN+SUB.
1344 __ AddConstant(R0, R0, 0x1002); // MOVW+ADD.
1345 __ AddConstant(R1, R1, 0xffff); // MOVW+ADD.
1346 __ AddConstant(R0, R0, 0x10000); // 32-bit ADD, encoding T3.
1347 __ AddConstant(R1, R1, 0x10001); // 32-bit ADD, encoding T3.
1348 __ AddConstant(R0, R0, 0x10002); // MVN+SUB.
1349 __ AddConstant(R1, R1, 0x10003); // MOVW+MOVT+ADD.
1350 __ AddConstant(R0, R0, -1); // 16-bit SUBS, encoding T2.
1351 __ AddConstant(R1, R1, -7); // 16-bit SUBS, encoding T2.
1352 __ AddConstant(R0, R0, -8); // 16-bit SUBS, encoding T2.
1353 __ AddConstant(R1, R1, -255); // 16-bit SUBS, encoding T2.
1354 __ AddConstant(R0, R0, -256); // 32-bit SUB, encoding T3.
1355 __ AddConstant(R1, R1, -257); // 32-bit SUB, encoding T4.
1356 __ AddConstant(R0, R0, -0xfff); // 32-bit SUB, encoding T4.
1357 __ AddConstant(R1, R1, -0x1000); // 32-bit SUB, encoding T3.
1358 __ AddConstant(R0, R0, -0x1001); // MVN+ADD.
1359 __ AddConstant(R1, R1, -0x1002); // MOVW+SUB.
1360 __ AddConstant(R0, R0, -0xffff); // MOVW+SUB.
1361 __ AddConstant(R1, R1, -0x10000); // 32-bit SUB, encoding T3.
1362 __ AddConstant(R0, R0, -0x10001); // 32-bit SUB, encoding T3.
1363 __ AddConstant(R1, R1, -0x10002); // MVN+ADD.
1364 __ AddConstant(R0, R0, -0x10003); // MOVW+MOVT+ADD.
1365
1366 // High registers.
1367 __ AddConstant(R8, R8, 0); // Nothing.
1368 __ AddConstant(R8, R1, 1); // 32-bit ADD, encoding T3,
1369 __ AddConstant(R0, R8, 7); // 32-bit ADD, encoding T3.
1370 __ AddConstant(R8, R8, 8); // 32-bit ADD, encoding T3.
1371 __ AddConstant(R8, R1, 255); // 32-bit ADD, encoding T3.
1372 __ AddConstant(R0, R8, 256); // 32-bit ADD, encoding T3.
1373 __ AddConstant(R8, R8, 257); // 32-bit ADD, encoding T4.
1374 __ AddConstant(R8, R1, 0xfff); // 32-bit ADD, encoding T4.
1375 __ AddConstant(R0, R8, 0x1000); // 32-bit ADD, encoding T3.
1376 __ AddConstant(R8, R8, 0x1001); // MVN+SUB.
1377 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1378 __ AddConstant(R0, R8, 0xffff); // MOVW+ADD.
1379 __ AddConstant(R8, R8, 0x10000); // 32-bit ADD, encoding T3.
1380 __ AddConstant(R8, R1, 0x10001); // 32-bit ADD, encoding T3.
1381 __ AddConstant(R0, R8, 0x10002); // MVN+SUB.
1382 __ AddConstant(R0, R8, 0x10003); // MOVW+MOVT+ADD.
1383 __ AddConstant(R8, R8, -1); // 32-bit ADD, encoding T3.
1384 __ AddConstant(R8, R1, -7); // 32-bit SUB, encoding T3.
1385 __ AddConstant(R0, R8, -8); // 32-bit SUB, encoding T3.
1386 __ AddConstant(R8, R8, -255); // 32-bit SUB, encoding T3.
1387 __ AddConstant(R8, R1, -256); // 32-bit SUB, encoding T3.
1388 __ AddConstant(R0, R8, -257); // 32-bit SUB, encoding T4.
1389 __ AddConstant(R8, R8, -0xfff); // 32-bit SUB, encoding T4.
1390 __ AddConstant(R8, R1, -0x1000); // 32-bit SUB, encoding T3.
1391 __ AddConstant(R0, R8, -0x1001); // MVN+ADD.
1392 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1393 __ AddConstant(R8, R1, -0xffff); // MOVW+SUB.
1394 __ AddConstant(R0, R8, -0x10000); // 32-bit SUB, encoding T3.
1395 __ AddConstant(R8, R8, -0x10001); // 32-bit SUB, encoding T3.
1396 __ AddConstant(R8, R1, -0x10002); // MVN+SUB.
1397 __ AddConstant(R0, R8, -0x10003); // MOVW+MOVT+ADD.
1398
1399 // Low registers, Rd != Rn, kCcKeep.
1400 __ AddConstant(R0, R1, 0, AL, kCcKeep); // MOV.
1401 __ AddConstant(R0, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1402 __ AddConstant(R0, R1, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1403 __ AddConstant(R0, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1404 __ AddConstant(R0, R1, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1405 __ AddConstant(R0, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1406 __ AddConstant(R0, R1, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1407 __ AddConstant(R0, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1408 __ AddConstant(R0, R1, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1409 __ AddConstant(R0, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1410 __ AddConstant(R0, R1, 0x1002, AL, kCcKeep); // MOVW+ADD.
1411 __ AddConstant(R0, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1412 __ AddConstant(R0, R1, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1413 __ AddConstant(R0, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1414 __ AddConstant(R0, R1, 0x10002, AL, kCcKeep); // MVN+SUB.
1415 __ AddConstant(R0, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1416 __ AddConstant(R0, R1, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1417 __ AddConstant(R0, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1418 __ AddConstant(R0, R1, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1419 __ AddConstant(R0, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1420 __ AddConstant(R0, R1, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1421 __ AddConstant(R0, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1422 __ AddConstant(R0, R1, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1423 __ AddConstant(R0, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1424 __ AddConstant(R0, R1, -0x1001, AL, kCcKeep); // MVN+ADD.
1425 __ AddConstant(R0, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1426 __ AddConstant(R0, R1, -0xffff, AL, kCcKeep); // MOVW+SUB.
1427 __ AddConstant(R0, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1428 __ AddConstant(R0, R1, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1429 __ AddConstant(R0, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1430 __ AddConstant(R0, R1, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1431
1432 // Low registers, Rd == Rn, kCcKeep.
1433 __ AddConstant(R0, R0, 0, AL, kCcKeep); // Nothing.
1434 __ AddConstant(R1, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1435 __ AddConstant(R0, R0, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1436 __ AddConstant(R1, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1437 __ AddConstant(R0, R0, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1438 __ AddConstant(R1, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1439 __ AddConstant(R0, R0, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1440 __ AddConstant(R1, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1441 __ AddConstant(R0, R0, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1442 __ AddConstant(R1, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1443 __ AddConstant(R0, R0, 0x1002, AL, kCcKeep); // MOVW+ADD.
1444 __ AddConstant(R1, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1445 __ AddConstant(R0, R0, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1446 __ AddConstant(R1, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1447 __ AddConstant(R0, R0, 0x10002, AL, kCcKeep); // MVN+SUB.
1448 __ AddConstant(R1, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1449 __ AddConstant(R0, R0, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1450 __ AddConstant(R1, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1451 __ AddConstant(R0, R0, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1452 __ AddConstant(R1, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1453 __ AddConstant(R0, R0, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1454 __ AddConstant(R1, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1455 __ AddConstant(R0, R0, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1456 __ AddConstant(R1, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1457 __ AddConstant(R0, R0, -0x1001, AL, kCcKeep); // MVN+ADD.
1458 __ AddConstant(R1, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1459 __ AddConstant(R0, R0, -0xffff, AL, kCcKeep); // MOVW+SUB.
1460 __ AddConstant(R1, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1461 __ AddConstant(R0, R0, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1462 __ AddConstant(R1, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1463 __ AddConstant(R0, R0, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1464
1465 // Low registers, Rd != Rn, kCcSet.
1466 __ AddConstant(R0, R1, 0, AL, kCcSet); // 16-bit ADDS.
1467 __ AddConstant(R0, R1, 1, AL, kCcSet); // 16-bit ADDS.
1468 __ AddConstant(R0, R1, 7, AL, kCcSet); // 16-bit ADDS.
1469 __ AddConstant(R0, R1, 8, AL, kCcSet); // 32-bit ADDS, encoding T3.
1470 __ AddConstant(R0, R1, 255, AL, kCcSet); // 32-bit ADDS, encoding T3.
1471 __ AddConstant(R0, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1472 __ AddConstant(R0, R1, 257, AL, kCcSet); // MVN+SUBS.
1473 __ AddConstant(R0, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1474 __ AddConstant(R0, R1, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1475 __ AddConstant(R0, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1476 __ AddConstant(R0, R1, 0x1002, AL, kCcSet); // MOVW+ADDS.
1477 __ AddConstant(R0, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1478 __ AddConstant(R0, R1, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1479 __ AddConstant(R0, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1480 __ AddConstant(R0, R1, 0x10002, AL, kCcSet); // MVN+SUBS.
1481 __ AddConstant(R0, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1482 __ AddConstant(R0, R1, -1, AL, kCcSet); // 16-bit SUBS.
1483 __ AddConstant(R0, R1, -7, AL, kCcSet); // 16-bit SUBS.
1484 __ AddConstant(R0, R1, -8, AL, kCcSet); // 32-bit SUBS, encoding T3.
1485 __ AddConstant(R0, R1, -255, AL, kCcSet); // 32-bit SUBS, encoding T3.
1486 __ AddConstant(R0, R1, -256, AL, kCcSet); // 32-bit SUBS, encoding T3.
1487 __ AddConstant(R0, R1, -257, AL, kCcSet); // MVN+ADDS.
1488 __ AddConstant(R0, R1, -0xfff, AL, kCcSet); // MOVW+SUBS.
1489 __ AddConstant(R0, R1, -0x1000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1490 __ AddConstant(R0, R1, -0x1001, AL, kCcSet); // MVN+ADDS.
1491 __ AddConstant(R0, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1492 __ AddConstant(R0, R1, -0xffff, AL, kCcSet); // MOVW+SUBS.
1493 __ AddConstant(R0, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1494 __ AddConstant(R0, R1, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1495 __ AddConstant(R0, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1496 __ AddConstant(R0, R1, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1497
1498 // Low registers, Rd == Rn, kCcSet.
1499 __ AddConstant(R0, R0, 0, AL, kCcSet); // 16-bit ADDS, encoding T2.
1500 __ AddConstant(R1, R1, 1, AL, kCcSet); // 16-bit ADDS, encoding T2.
1501 __ AddConstant(R0, R0, 7, AL, kCcSet); // 16-bit ADDS, encoding T2.
1502 __ AddConstant(R1, R1, 8, AL, kCcSet); // 16-bit ADDS, encoding T2.
1503 __ AddConstant(R0, R0, 255, AL, kCcSet); // 16-bit ADDS, encoding T2.
1504 __ AddConstant(R1, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1505 __ AddConstant(R0, R0, 257, AL, kCcSet); // MVN+SUBS.
1506 __ AddConstant(R1, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1507 __ AddConstant(R0, R0, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1508 __ AddConstant(R1, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1509 __ AddConstant(R0, R0, 0x1002, AL, kCcSet); // MOVW+ADDS.
1510 __ AddConstant(R1, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1511 __ AddConstant(R0, R0, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1512 __ AddConstant(R1, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1513 __ AddConstant(R0, R0, 0x10002, AL, kCcSet); // MVN+SUBS.
1514 __ AddConstant(R1, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1515 __ AddConstant(R0, R0, -1, AL, kCcSet); // 16-bit SUBS, encoding T2.
1516 __ AddConstant(R1, R1, -7, AL, kCcSet); // 16-bit SUBS, encoding T2.
1517 __ AddConstant(R0, R0, -8, AL, kCcSet); // 16-bit SUBS, encoding T2.
1518 __ AddConstant(R1, R1, -255, AL, kCcSet); // 16-bit SUBS, encoding T2.
1519 __ AddConstant(R0, R0, -256, AL, kCcSet); // 32-bit SUB, encoding T3.
1520 __ AddConstant(R1, R1, -257, AL, kCcSet); // MNV+ADDS.
1521 __ AddConstant(R0, R0, -0xfff, AL, kCcSet); // MOVW+SUBS.
1522 __ AddConstant(R1, R1, -0x1000, AL, kCcSet); // 32-bit SUB, encoding T3.
1523 __ AddConstant(R0, R0, -0x1001, AL, kCcSet); // MVN+ADDS.
1524 __ AddConstant(R1, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1525 __ AddConstant(R0, R0, -0xffff, AL, kCcSet); // MOVW+SUBS.
1526 __ AddConstant(R1, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1527 __ AddConstant(R0, R0, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1528 __ AddConstant(R1, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1529 __ AddConstant(R0, R0, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1530
1531 __ it(EQ);
1532 __ AddConstant(R0, R1, 1, EQ, kCcSet); // 32-bit ADDS, encoding T3.
1533 __ it(NE);
1534 __ AddConstant(R0, R1, 1, NE, kCcKeep); // 16-bit ADDS, encoding T1.
1535 __ it(GE);
1536 __ AddConstant(R0, R0, 1, GE, kCcSet); // 32-bit ADDS, encoding T3.
1537 __ it(LE);
1538 __ AddConstant(R0, R0, 1, LE, kCcKeep); // 16-bit ADDS, encoding T2.
1539
1540 EmitAndCheck(&assembler, "AddConstant");
1541}
1542
Vladimir Marko93205e32016-04-13 11:59:46 +01001543TEST_F(Thumb2AssemblerTest, CmpConstant) {
Vladimir Markoac6ac102015-12-17 12:14:00 +00001544 __ CmpConstant(R0, 0); // 16-bit CMP.
1545 __ CmpConstant(R1, 1); // 16-bit CMP.
1546 __ CmpConstant(R0, 7); // 16-bit CMP.
1547 __ CmpConstant(R1, 8); // 16-bit CMP.
1548 __ CmpConstant(R0, 255); // 16-bit CMP.
1549 __ CmpConstant(R1, 256); // 32-bit CMP.
1550 __ CmpConstant(R0, 257); // MNV+CMN.
1551 __ CmpConstant(R1, 0xfff); // MOVW+CMP.
1552 __ CmpConstant(R0, 0x1000); // 32-bit CMP.
1553 __ CmpConstant(R1, 0x1001); // MNV+CMN.
1554 __ CmpConstant(R0, 0x1002); // MOVW+CMP.
1555 __ CmpConstant(R1, 0xffff); // MOVW+CMP.
1556 __ CmpConstant(R0, 0x10000); // 32-bit CMP.
1557 __ CmpConstant(R1, 0x10001); // 32-bit CMP.
1558 __ CmpConstant(R0, 0x10002); // MVN+CMN.
1559 __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
1560 __ CmpConstant(R0, -1); // 32-bit CMP.
1561 __ CmpConstant(R1, -7); // CMN.
1562 __ CmpConstant(R0, -8); // CMN.
1563 __ CmpConstant(R1, -255); // CMN.
1564 __ CmpConstant(R0, -256); // CMN.
1565 __ CmpConstant(R1, -257); // MNV+CMP.
1566 __ CmpConstant(R0, -0xfff); // MOVW+CMN.
1567 __ CmpConstant(R1, -0x1000); // CMN.
1568 __ CmpConstant(R0, -0x1001); // MNV+CMP.
1569 __ CmpConstant(R1, -0x1002); // MOVW+CMN.
1570 __ CmpConstant(R0, -0xffff); // MOVW+CMN.
1571 __ CmpConstant(R1, -0x10000); // CMN.
1572 __ CmpConstant(R0, -0x10001); // CMN.
1573 __ CmpConstant(R1, -0x10002); // MVN+CMP.
1574 __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
1575
1576 __ CmpConstant(R8, 0); // 32-bit CMP.
1577 __ CmpConstant(R9, 1); // 32-bit CMP.
1578 __ CmpConstant(R8, 7); // 32-bit CMP.
1579 __ CmpConstant(R9, 8); // 32-bit CMP.
1580 __ CmpConstant(R8, 255); // 32-bit CMP.
1581 __ CmpConstant(R9, 256); // 32-bit CMP.
1582 __ CmpConstant(R8, 257); // MNV+CMN
1583 __ CmpConstant(R9, 0xfff); // MOVW+CMP.
1584 __ CmpConstant(R8, 0x1000); // 32-bit CMP.
1585 __ CmpConstant(R9, 0x1001); // MVN+CMN.
1586 __ CmpConstant(R8, 0x1002); // MOVW+CMP.
1587 __ CmpConstant(R9, 0xffff); // MOVW+CMP.
1588 __ CmpConstant(R8, 0x10000); // 32-bit CMP.
1589 __ CmpConstant(R9, 0x10001); // 32-bit CMP.
1590 __ CmpConstant(R8, 0x10002); // MVN+CMN.
1591 __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
1592 __ CmpConstant(R8, -1); // 32-bit CMP
1593 __ CmpConstant(R9, -7); // CMN.
1594 __ CmpConstant(R8, -8); // CMN.
1595 __ CmpConstant(R9, -255); // CMN.
1596 __ CmpConstant(R8, -256); // CMN.
1597 __ CmpConstant(R9, -257); // MNV+CMP.
1598 __ CmpConstant(R8, -0xfff); // MOVW+CMN.
1599 __ CmpConstant(R9, -0x1000); // CMN.
1600 __ CmpConstant(R8, -0x1001); // MVN+CMP.
1601 __ CmpConstant(R9, -0x1002); // MOVW+CMN.
1602 __ CmpConstant(R8, -0xffff); // MOVW+CMN.
1603 __ CmpConstant(R9, -0x10000); // CMN.
1604 __ CmpConstant(R8, -0x10001); // CMN.
1605 __ CmpConstant(R9, -0x10002); // MVN+CMP.
1606 __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
1607
1608 EmitAndCheck(&assembler, "CmpConstant");
1609}
1610
Dave Allison65fcc2c2014-04-28 13:45:27 -07001611#undef __
1612} // namespace arm
1613} // namespace art