blob: b499dddb0cf7637a19a17fe7d18462f6add681f8 [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_thumb2.h"
18
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/bit_utils.h"
Dave Allison65fcc2c2014-04-28 13:45:27 -070020#include "base/logging.h"
21#include "entrypoints/quick/quick_entrypoints.h"
22#include "offsets.h"
23#include "thread.h"
Dave Allison65fcc2c2014-04-28 13:45:27 -070024
25namespace art {
26namespace arm {
27
Vladimir Marko6b756b52015-07-14 11:58:38 +010028void Thumb2Assembler::Fixup::PrepareDependents(Thumb2Assembler* assembler) {
29 // For each Fixup, it's easy to find the Fixups that it depends on as they are either
30 // the following or the preceding Fixups until we find the target. However, for fixup
31 // adjustment we need the reverse lookup, i.e. what Fixups depend on a given Fixup.
32 // This function creates a compact representation of this relationship, where we have
33 // all the dependents in a single array and Fixups reference their ranges by start
34 // index and count. (Instead of having a per-fixup vector.)
35
36 // Count the number of dependents of each Fixup.
37 const FixupId end_id = assembler->fixups_.size();
38 Fixup* fixups = assembler->fixups_.data();
39 for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
40 uint32_t target = fixups[fixup_id].target_;
41 if (target > fixups[fixup_id].location_) {
42 for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
43 fixups[id].dependents_count_ += 1u;
44 }
45 } else {
46 for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) {
47 fixups[id - 1u].dependents_count_ += 1u;
48 }
49 }
50 }
51 // Assign index ranges in fixup_dependents_ to individual fixups. Record the end of the
52 // range in dependents_start_, we shall later decrement it as we fill in fixup_dependents_.
53 uint32_t number_of_dependents = 0u;
54 for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
55 number_of_dependents += fixups[fixup_id].dependents_count_;
56 fixups[fixup_id].dependents_start_ = number_of_dependents;
57 }
58 if (number_of_dependents == 0u) {
59 return;
60 }
61 // Create and fill in the fixup_dependents_.
62 assembler->fixup_dependents_.reset(new FixupId[number_of_dependents]);
63 FixupId* dependents = assembler->fixup_dependents_.get();
64 for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
65 uint32_t target = fixups[fixup_id].target_;
66 if (target > fixups[fixup_id].location_) {
67 for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
68 fixups[id].dependents_start_ -= 1u;
69 dependents[fixups[id].dependents_start_] = fixup_id;
70 }
71 } else {
72 for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) {
73 fixups[id - 1u].dependents_start_ -= 1u;
74 dependents[fixups[id - 1u].dependents_start_] = fixup_id;
75 }
76 }
77 }
78}
79
Vladimir Markocf93a5c2015-06-16 11:33:24 +000080void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) {
81 CHECK(!label->IsBound());
82
83 while (label->IsLinked()) {
84 FixupId fixup_id = label->Position(); // The id for linked Fixup.
85 Fixup* fixup = GetFixup(fixup_id); // Get the Fixup at this id.
86 fixup->Resolve(bound_pc); // Fixup can be resolved now.
Vladimir Markocf93a5c2015-06-16 11:33:24 +000087 uint32_t fixup_location = fixup->GetLocation();
88 uint16_t next = buffer_.Load<uint16_t>(fixup_location); // Get next in chain.
89 buffer_.Store<int16_t>(fixup_location, 0);
90 label->position_ = next; // Move to next.
91 }
92 label->BindTo(bound_pc);
93}
94
95void Thumb2Assembler::BindLiterals() {
96 // We don't add the padding here, that's done only after adjusting the Fixup sizes.
97 uint32_t code_size = buffer_.Size();
98 for (Literal& lit : literals_) {
99 Label* label = lit.GetLabel();
100 BindLabel(label, code_size);
101 code_size += lit.GetSize();
102 }
103}
104
105void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size,
106 std::deque<FixupId>* fixups_to_recalculate) {
107 uint32_t adjustment = fixup->AdjustSizeIfNeeded(*current_code_size);
108 if (adjustment != 0u) {
109 *current_code_size += adjustment;
Vladimir Marko6b756b52015-07-14 11:58:38 +0100110 for (FixupId dependent_id : fixup->Dependents(*this)) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000111 Fixup* dependent = GetFixup(dependent_id);
112 dependent->IncreaseAdjustment(adjustment);
113 if (buffer_.Load<int16_t>(dependent->GetLocation()) == 0) {
114 buffer_.Store<int16_t>(dependent->GetLocation(), 1);
115 fixups_to_recalculate->push_back(dependent_id);
116 }
117 }
118 }
119}
120
121uint32_t Thumb2Assembler::AdjustFixups() {
Vladimir Marko6b756b52015-07-14 11:58:38 +0100122 Fixup::PrepareDependents(this);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000123 uint32_t current_code_size = buffer_.Size();
124 std::deque<FixupId> fixups_to_recalculate;
125 if (kIsDebugBuild) {
126 // We will use the placeholders in the buffer_ to mark whether the fixup has
127 // been added to the fixups_to_recalculate. Make sure we start with zeros.
128 for (Fixup& fixup : fixups_) {
129 CHECK_EQ(buffer_.Load<int16_t>(fixup.GetLocation()), 0);
130 }
131 }
132 for (Fixup& fixup : fixups_) {
133 AdjustFixupIfNeeded(&fixup, &current_code_size, &fixups_to_recalculate);
134 }
135 while (!fixups_to_recalculate.empty()) {
Vladimir Marko663c9342015-07-22 11:28:14 +0100136 do {
137 // Pop the fixup.
138 FixupId fixup_id = fixups_to_recalculate.front();
139 fixups_to_recalculate.pop_front();
140 Fixup* fixup = GetFixup(fixup_id);
141 DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0);
142 buffer_.Store<int16_t>(fixup->GetLocation(), 0);
143 // See if it needs adjustment.
144 AdjustFixupIfNeeded(fixup, &current_code_size, &fixups_to_recalculate);
145 } while (!fixups_to_recalculate.empty());
146
147 if ((current_code_size & 2) != 0 && !literals_.empty()) {
148 // If we need to add padding before literals, this may just push some out of range,
149 // so recalculate all load literals. This makes up for the fact that we don't mark
150 // load literal as a dependency of all previous Fixups even though it actually is.
151 for (Fixup& fixup : fixups_) {
152 if (fixup.IsLoadLiteral()) {
153 AdjustFixupIfNeeded(&fixup, &current_code_size, &fixups_to_recalculate);
154 }
155 }
156 }
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000157 }
158 if (kIsDebugBuild) {
159 // Check that no fixup is marked as being in fixups_to_recalculate anymore.
160 for (Fixup& fixup : fixups_) {
161 CHECK_EQ(buffer_.Load<int16_t>(fixup.GetLocation()), 0);
162 }
163 }
164
165 // Adjust literal pool labels for padding.
Roland Levillain14d90572015-07-16 10:52:26 +0100166 DCHECK_ALIGNED(current_code_size, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000167 uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size();
168 if (literals_adjustment != 0u) {
169 for (Literal& literal : literals_) {
170 Label* label = literal.GetLabel();
171 DCHECK(label->IsBound());
172 int old_position = label->Position();
173 label->Reinitialize();
174 label->BindTo(old_position + literals_adjustment);
175 }
176 }
177
178 return current_code_size;
179}
180
181void Thumb2Assembler::EmitFixups(uint32_t adjusted_code_size) {
182 // Move non-fixup code to its final place and emit fixups.
183 // Process fixups in reverse order so that we don't repeatedly move the same data.
184 size_t src_end = buffer_.Size();
185 size_t dest_end = adjusted_code_size;
186 buffer_.Resize(dest_end);
187 DCHECK_GE(dest_end, src_end);
188 for (auto i = fixups_.rbegin(), end = fixups_.rend(); i != end; ++i) {
189 Fixup* fixup = &*i;
190 if (fixup->GetOriginalSize() == fixup->GetSize()) {
191 // The size of this Fixup didn't change. To avoid moving the data
192 // in small chunks, emit the code to its original position.
193 fixup->Emit(&buffer_, adjusted_code_size);
194 fixup->Finalize(dest_end - src_end);
195 } else {
196 // Move the data between the end of the fixup and src_end to its final location.
197 size_t old_fixup_location = fixup->GetLocation();
198 size_t src_begin = old_fixup_location + fixup->GetOriginalSizeInBytes();
199 size_t data_size = src_end - src_begin;
200 size_t dest_begin = dest_end - data_size;
201 buffer_.Move(dest_begin, src_begin, data_size);
202 src_end = old_fixup_location;
203 dest_end = dest_begin - fixup->GetSizeInBytes();
204 // Finalize the Fixup and emit the data to the new location.
205 fixup->Finalize(dest_end - src_end);
206 fixup->Emit(&buffer_, adjusted_code_size);
207 }
208 }
209 CHECK_EQ(src_end, dest_end);
210}
211
212void Thumb2Assembler::EmitLiterals() {
213 if (!literals_.empty()) {
214 // Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment.
215 // We don't support byte and half-word literals.
216 uint32_t code_size = buffer_.Size();
Roland Levillain14d90572015-07-16 10:52:26 +0100217 DCHECK_ALIGNED(code_size, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000218 if ((code_size & 2u) != 0u) {
219 Emit16(0);
220 }
221 for (Literal& literal : literals_) {
222 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
223 DCHECK_EQ(static_cast<size_t>(literal.GetLabel()->Position()), buffer_.Size());
224 DCHECK(literal.GetSize() == 4u || literal.GetSize() == 8u);
225 for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
226 buffer_.Emit<uint8_t>(literal.GetData()[i]);
227 }
228 }
229 }
230}
231
232inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) {
Roland Levillain14d90572015-07-16 10:52:26 +0100233 DCHECK_ALIGNED(offset, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000234 int16_t encoding = B15 | B14;
235 if (cond != AL) {
236 DCHECK(IsInt<9>(offset));
237 encoding |= B12 | (static_cast<int32_t>(cond) << 8) | ((offset >> 1) & 0xff);
238 } else {
239 DCHECK(IsInt<12>(offset));
240 encoding |= B13 | ((offset >> 1) & 0x7ff);
241 }
242 return encoding;
243}
244
245inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) {
Roland Levillain14d90572015-07-16 10:52:26 +0100246 DCHECK_ALIGNED(offset, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000247 int32_t s = (offset >> 31) & 1; // Sign bit.
248 int32_t encoding = B31 | B30 | B29 | B28 | B15 |
249 (s << 26) | // Sign bit goes to bit 26.
250 ((offset >> 1) & 0x7ff); // imm11 goes to bits 0-10.
251 if (cond != AL) {
252 DCHECK(IsInt<21>(offset));
253 // Encode cond, move imm6 from bits 12-17 to bits 16-21 and move J1 and J2.
254 encoding |= (static_cast<int32_t>(cond) << 22) | ((offset & 0x3f000) << (16 - 12)) |
255 ((offset & (1 << 19)) >> (19 - 13)) | // Extract J1 from bit 19 to bit 13.
256 ((offset & (1 << 18)) >> (18 - 11)); // Extract J2 from bit 18 to bit 11.
257 } else {
258 DCHECK(IsInt<25>(offset));
259 int32_t j1 = ((offset >> 23) ^ s ^ 1) & 1; // Calculate J1 from I1 extracted from bit 23.
260 int32_t j2 = ((offset >> 22)^ s ^ 1) & 1; // Calculate J2 from I2 extracted from bit 22.
261 // Move imm10 from bits 12-21 to bits 16-25 and add J1 and J2.
262 encoding |= B12 | ((offset & 0x3ff000) << (16 - 12)) |
263 (j1 << 13) | (j2 << 11);
264 }
265 return encoding;
266}
267
268inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) {
269 DCHECK(!IsHighRegister(rn));
Roland Levillain14d90572015-07-16 10:52:26 +0100270 DCHECK_ALIGNED(offset, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000271 DCHECK(IsUint<7>(offset));
272 DCHECK(cond == EQ || cond == NE);
273 return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast<int32_t>(rn) |
274 ((offset & 0x3e) << (3 - 1)) | // Move imm5 from bits 1-5 to bits 3-7.
275 ((offset & 0x40) << (9 - 6)); // Move i from bit 6 to bit 11
276}
277
278inline int16_t Thumb2Assembler::CmpRnImm8Encoding16(Register rn, int32_t value) {
279 DCHECK(!IsHighRegister(rn));
280 DCHECK(IsUint<8>(value));
281 return B13 | B11 | (rn << 8) | value;
282}
283
284inline int16_t Thumb2Assembler::AddRdnRmEncoding16(Register rdn, Register rm) {
285 // The high bit of rn is moved across 4-bit rm.
286 return B14 | B10 | (static_cast<int32_t>(rm) << 3) |
287 (static_cast<int32_t>(rdn) & 7) | ((static_cast<int32_t>(rdn) & 8) << 4);
288}
289
290inline int32_t Thumb2Assembler::MovwEncoding32(Register rd, int32_t value) {
291 DCHECK(IsUint<16>(value));
292 return B31 | B30 | B29 | B28 | B25 | B22 |
293 (static_cast<int32_t>(rd) << 8) |
294 ((value & 0xf000) << (16 - 12)) | // Move imm4 from bits 12-15 to bits 16-19.
295 ((value & 0x0800) << (26 - 11)) | // Move i from bit 11 to bit 26.
296 ((value & 0x0700) << (12 - 8)) | // Move imm3 from bits 8-10 to bits 12-14.
297 (value & 0xff); // Keep imm8 in bits 0-7.
298}
299
300inline int32_t Thumb2Assembler::MovtEncoding32(Register rd, int32_t value) {
301 DCHECK_EQ(value & 0xffff, 0);
302 int32_t movw_encoding = MovwEncoding32(rd, (value >> 16) & 0xffff);
303 return movw_encoding | B25 | B23;
304}
305
306inline int32_t Thumb2Assembler::MovModImmEncoding32(Register rd, int32_t value) {
307 uint32_t mod_imm = ModifiedImmediate(value);
308 DCHECK_NE(mod_imm, kInvalidModifiedImmediate);
309 return B31 | B30 | B29 | B28 | B22 | B19 | B18 | B17 | B16 |
310 (static_cast<int32_t>(rd) << 8) | static_cast<int32_t>(mod_imm);
311}
312
313inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) {
314 DCHECK(!IsHighRegister(rt));
Roland Levillain14d90572015-07-16 10:52:26 +0100315 DCHECK_ALIGNED(offset, 4);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000316 DCHECK(IsUint<10>(offset));
317 return B14 | B11 | (static_cast<int32_t>(rt) << 8) | (offset >> 2);
318}
319
320inline int32_t Thumb2Assembler::LdrLitEncoding32(Register rt, int32_t offset) {
321 // NOTE: We don't support negative offset, i.e. U=0 (B23).
322 return LdrRtRnImm12Encoding(rt, PC, offset);
323}
324
325inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) {
Roland Levillain14d90572015-07-16 10:52:26 +0100326 DCHECK_ALIGNED(offset, 4);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000327 CHECK(IsUint<10>(offset));
328 return B31 | B30 | B29 | B27 |
329 B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 |
330 (static_cast<int32_t>(rn) << 16) | (static_cast<int32_t>(rt) << 12) |
331 (static_cast<int32_t>(rt2) << 8) | (offset >> 2);
332}
333
334inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) {
Roland Levillain14d90572015-07-16 10:52:26 +0100335 DCHECK_ALIGNED(offset, 4);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000336 CHECK(IsUint<10>(offset));
337 return B31 | B30 | B29 | B27 | B26 | B24 |
338 B23 /* U = 1 */ | B20 | B11 | B9 |
339 (static_cast<int32_t>(rn) << 16) |
340 ((static_cast<int32_t>(sd) & 0x01) << (22 - 0)) | // Move D from bit 0 to bit 22.
341 ((static_cast<int32_t>(sd) & 0x1e) << (12 - 1)) | // Move Vd from bits 1-4 to bits 12-15.
342 (offset >> 2);
343}
344
345inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) {
Roland Levillain14d90572015-07-16 10:52:26 +0100346 DCHECK_ALIGNED(offset, 4);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000347 CHECK(IsUint<10>(offset));
348 return B31 | B30 | B29 | B27 | B26 | B24 |
349 B23 /* U = 1 */ | B20 | B11 | B9 | B8 |
350 (rn << 16) |
351 ((static_cast<int32_t>(dd) & 0x10) << (22 - 4)) | // Move D from bit 4 to bit 22.
352 ((static_cast<int32_t>(dd) & 0x0f) << (12 - 0)) | // Move Vd from bits 0-3 to bits 12-15.
353 (offset >> 2);
354}
355
356inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) {
357 DCHECK(!IsHighRegister(rt));
358 DCHECK(!IsHighRegister(rn));
Roland Levillain14d90572015-07-16 10:52:26 +0100359 DCHECK_ALIGNED(offset, 4);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000360 DCHECK(IsUint<7>(offset));
361 return B14 | B13 | B11 |
362 (static_cast<int32_t>(rn) << 3) | static_cast<int32_t>(rt) |
363 (offset << (6 - 2)); // Move imm5 from bits 2-6 to bits 6-10.
364}
365
366int32_t Thumb2Assembler::Fixup::LoadWideOrFpEncoding(Register rbase, int32_t offset) const {
367 switch (type_) {
368 case kLoadLiteralWide:
369 return LdrdEncoding32(rn_, rt2_, rbase, offset);
370 case kLoadFPLiteralSingle:
371 return VldrsEncoding32(sd_, rbase, offset);
372 case kLoadFPLiteralDouble:
373 return VldrdEncoding32(dd_, rbase, offset);
374 default:
375 LOG(FATAL) << "Unexpected type: " << static_cast<int>(type_);
376 UNREACHABLE();
377 }
378}
379
380inline int32_t Thumb2Assembler::LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset) {
381 DCHECK(IsUint<12>(offset));
382 return B31 | B30 | B29 | B28 | B27 | B23 | B22 | B20 | (rn << 16) | (rt << 12) | offset;
383}
384
385void Thumb2Assembler::FinalizeCode() {
386 ArmAssembler::FinalizeCode();
387 BindLiterals();
388 uint32_t adjusted_code_size = AdjustFixups();
389 EmitFixups(adjusted_code_size);
390 EmitLiterals();
391}
392
Nicolas Geoffray3d1e7882015-02-03 13:59:52 +0000393bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
394 Register rn ATTRIBUTE_UNUSED,
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +0000395 Opcode opcode,
396 uint32_t immediate,
397 ShifterOperand* shifter_op) {
398 shifter_op->type_ = ShifterOperand::kImmediate;
399 shifter_op->immed_ = immediate;
400 shifter_op->is_shift_ = false;
401 shifter_op->is_rotate_ = false;
402 switch (opcode) {
403 case ADD:
404 case SUB:
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +0000405 if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
406 return true;
407 }
408 return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
409
410 case MOV:
411 // TODO: Support less than or equal to 12bits.
412 return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
413 case MVN:
414 default:
415 return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
416 }
417}
418
Dave Allison65fcc2c2014-04-28 13:45:27 -0700419void Thumb2Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
420 Condition cond) {
421 EmitDataProcessing(cond, AND, 0, rn, rd, so);
422}
423
424
425void Thumb2Assembler::eor(Register rd, Register rn, const ShifterOperand& so,
426 Condition cond) {
427 EmitDataProcessing(cond, EOR, 0, rn, rd, so);
428}
429
430
431void Thumb2Assembler::sub(Register rd, Register rn, const ShifterOperand& so,
432 Condition cond) {
433 EmitDataProcessing(cond, SUB, 0, rn, rd, so);
434}
435
436
437void Thumb2Assembler::rsb(Register rd, Register rn, const ShifterOperand& so,
438 Condition cond) {
439 EmitDataProcessing(cond, RSB, 0, rn, rd, so);
440}
441
442
443void Thumb2Assembler::rsbs(Register rd, Register rn, const ShifterOperand& so,
444 Condition cond) {
445 EmitDataProcessing(cond, RSB, 1, rn, rd, so);
446}
447
448
449void Thumb2Assembler::add(Register rd, Register rn, const ShifterOperand& so,
450 Condition cond) {
451 EmitDataProcessing(cond, ADD, 0, rn, rd, so);
452}
453
454
455void Thumb2Assembler::adds(Register rd, Register rn, const ShifterOperand& so,
456 Condition cond) {
457 EmitDataProcessing(cond, ADD, 1, rn, rd, so);
458}
459
460
461void Thumb2Assembler::subs(Register rd, Register rn, const ShifterOperand& so,
462 Condition cond) {
463 EmitDataProcessing(cond, SUB, 1, rn, rd, so);
464}
465
466
467void Thumb2Assembler::adc(Register rd, Register rn, const ShifterOperand& so,
468 Condition cond) {
469 EmitDataProcessing(cond, ADC, 0, rn, rd, so);
470}
471
472
473void Thumb2Assembler::sbc(Register rd, Register rn, const ShifterOperand& so,
474 Condition cond) {
475 EmitDataProcessing(cond, SBC, 0, rn, rd, so);
476}
477
478
479void Thumb2Assembler::rsc(Register rd, Register rn, const ShifterOperand& so,
480 Condition cond) {
481 EmitDataProcessing(cond, RSC, 0, rn, rd, so);
482}
483
484
485void Thumb2Assembler::tst(Register rn, const ShifterOperand& so, Condition cond) {
486 CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
487 EmitDataProcessing(cond, TST, 1, rn, R0, so);
488}
489
490
491void Thumb2Assembler::teq(Register rn, const ShifterOperand& so, Condition cond) {
492 CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
493 EmitDataProcessing(cond, TEQ, 1, rn, R0, so);
494}
495
496
497void Thumb2Assembler::cmp(Register rn, const ShifterOperand& so, Condition cond) {
498 EmitDataProcessing(cond, CMP, 1, rn, R0, so);
499}
500
501
502void Thumb2Assembler::cmn(Register rn, const ShifterOperand& so, Condition cond) {
503 EmitDataProcessing(cond, CMN, 1, rn, R0, so);
504}
505
506
507void Thumb2Assembler::orr(Register rd, Register rn,
508 const ShifterOperand& so, Condition cond) {
509 EmitDataProcessing(cond, ORR, 0, rn, rd, so);
510}
511
512
513void Thumb2Assembler::orrs(Register rd, Register rn,
514 const ShifterOperand& so, Condition cond) {
515 EmitDataProcessing(cond, ORR, 1, rn, rd, so);
516}
517
518
519void Thumb2Assembler::mov(Register rd, const ShifterOperand& so, Condition cond) {
520 EmitDataProcessing(cond, MOV, 0, R0, rd, so);
521}
522
523
524void Thumb2Assembler::movs(Register rd, const ShifterOperand& so, Condition cond) {
525 EmitDataProcessing(cond, MOV, 1, R0, rd, so);
526}
527
528
529void Thumb2Assembler::bic(Register rd, Register rn, const ShifterOperand& so,
530 Condition cond) {
531 EmitDataProcessing(cond, BIC, 0, rn, rd, so);
532}
533
534
535void Thumb2Assembler::mvn(Register rd, const ShifterOperand& so, Condition cond) {
536 EmitDataProcessing(cond, MVN, 0, R0, rd, so);
537}
538
539
540void Thumb2Assembler::mvns(Register rd, const ShifterOperand& so, Condition cond) {
541 EmitDataProcessing(cond, MVN, 1, R0, rd, so);
542}
543
544
545void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700546 CheckCondition(cond);
547
Dave Allison65fcc2c2014-04-28 13:45:27 -0700548 if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
549 // 16 bit.
550 int16_t encoding = B14 | B9 | B8 | B6 |
551 rn << 3 | rd;
552 Emit16(encoding);
553 } else {
554 // 32 bit.
Andreas Gampec8ccf682014-09-29 20:07:43 -0700555 uint32_t op1 = 0U /* 0b000 */;
556 uint32_t op2 = 0U /* 0b00 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700557 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
558 op1 << 20 |
559 B15 | B14 | B13 | B12 |
560 op2 << 4 |
561 static_cast<uint32_t>(rd) << 8 |
562 static_cast<uint32_t>(rn) << 16 |
563 static_cast<uint32_t>(rm);
564
565 Emit32(encoding);
566 }
567}
568
569
570void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
571 Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700572 CheckCondition(cond);
573
Andreas Gampec8ccf682014-09-29 20:07:43 -0700574 uint32_t op1 = 0U /* 0b000 */;
575 uint32_t op2 = 0U /* 0b00 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700576 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
577 op1 << 20 |
578 op2 << 4 |
579 static_cast<uint32_t>(rd) << 8 |
580 static_cast<uint32_t>(ra) << 12 |
581 static_cast<uint32_t>(rn) << 16 |
582 static_cast<uint32_t>(rm);
583
584 Emit32(encoding);
585}
586
587
588void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
589 Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700590 CheckCondition(cond);
591
Andreas Gampec8ccf682014-09-29 20:07:43 -0700592 uint32_t op1 = 0U /* 0b000 */;
593 uint32_t op2 = 01 /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700594 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
595 op1 << 20 |
596 op2 << 4 |
597 static_cast<uint32_t>(rd) << 8 |
598 static_cast<uint32_t>(ra) << 12 |
599 static_cast<uint32_t>(rn) << 16 |
600 static_cast<uint32_t>(rm);
601
602 Emit32(encoding);
603}
604
605
Zheng Xuc6667102015-05-15 16:08:45 +0800606void Thumb2Assembler::smull(Register rd_lo, Register rd_hi, Register rn,
607 Register rm, Condition cond) {
608 CheckCondition(cond);
609
610 uint32_t op1 = 0U /* 0b000; */;
611 uint32_t op2 = 0U /* 0b0000 */;
612 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
613 op1 << 20 |
614 op2 << 4 |
615 static_cast<uint32_t>(rd_lo) << 12 |
616 static_cast<uint32_t>(rd_hi) << 8 |
617 static_cast<uint32_t>(rn) << 16 |
618 static_cast<uint32_t>(rm);
619
620 Emit32(encoding);
621}
622
623
Dave Allison65fcc2c2014-04-28 13:45:27 -0700624void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
625 Register rm, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700626 CheckCondition(cond);
627
Andreas Gampec8ccf682014-09-29 20:07:43 -0700628 uint32_t op1 = 2U /* 0b010; */;
629 uint32_t op2 = 0U /* 0b0000 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700630 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
631 op1 << 20 |
632 op2 << 4 |
633 static_cast<uint32_t>(rd_lo) << 12 |
634 static_cast<uint32_t>(rd_hi) << 8 |
635 static_cast<uint32_t>(rn) << 16 |
636 static_cast<uint32_t>(rm);
637
638 Emit32(encoding);
639}
640
641
642void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700643 CheckCondition(cond);
644
Andreas Gampec8ccf682014-09-29 20:07:43 -0700645 uint32_t op1 = 1U /* 0b001 */;
646 uint32_t op2 = 15U /* 0b1111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700647 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
648 op1 << 20 |
649 op2 << 4 |
650 0xf << 12 |
651 static_cast<uint32_t>(rd) << 8 |
652 static_cast<uint32_t>(rn) << 16 |
653 static_cast<uint32_t>(rm);
654
655 Emit32(encoding);
656}
657
658
659void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700660 CheckCondition(cond);
661
Andreas Gampec8ccf682014-09-29 20:07:43 -0700662 uint32_t op1 = 1U /* 0b001 */;
663 uint32_t op2 = 15U /* 0b1111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700664 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
665 op1 << 20 |
666 op2 << 4 |
667 0xf << 12 |
668 static_cast<uint32_t>(rd) << 8 |
669 static_cast<uint32_t>(rn) << 16 |
670 static_cast<uint32_t>(rm);
671
672 Emit32(encoding);
673}
674
675
Roland Levillain51d3fc42014-11-13 14:11:42 +0000676void Thumb2Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
677 CheckCondition(cond);
678 CHECK_LE(lsb, 31U);
679 CHECK(1U <= width && width <= 32U) << width;
680 uint32_t widthminus1 = width - 1;
681 uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
682 uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
683
684 uint32_t op = 20U /* 0b10100 */;
685 int32_t encoding = B31 | B30 | B29 | B28 | B25 |
686 op << 20 |
687 static_cast<uint32_t>(rn) << 16 |
688 imm3 << 12 |
689 static_cast<uint32_t>(rd) << 8 |
690 imm2 << 6 |
691 widthminus1;
692
693 Emit32(encoding);
694}
695
696
Roland Levillain981e4542014-11-14 11:47:14 +0000697void Thumb2Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
698 CheckCondition(cond);
699 CHECK_LE(lsb, 31U);
700 CHECK(1U <= width && width <= 32U) << width;
701 uint32_t widthminus1 = width - 1;
702 uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
703 uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
704
705 uint32_t op = 28U /* 0b11100 */;
706 int32_t encoding = B31 | B30 | B29 | B28 | B25 |
707 op << 20 |
708 static_cast<uint32_t>(rn) << 16 |
709 imm3 << 12 |
710 static_cast<uint32_t>(rd) << 8 |
711 imm2 << 6 |
712 widthminus1;
713
714 Emit32(encoding);
715}
716
717
Dave Allison65fcc2c2014-04-28 13:45:27 -0700718void Thumb2Assembler::ldr(Register rd, const Address& ad, Condition cond) {
719 EmitLoadStore(cond, true, false, false, false, rd, ad);
720}
721
722
723void Thumb2Assembler::str(Register rd, const Address& ad, Condition cond) {
724 EmitLoadStore(cond, false, false, false, false, rd, ad);
725}
726
727
728void Thumb2Assembler::ldrb(Register rd, const Address& ad, Condition cond) {
729 EmitLoadStore(cond, true, true, false, false, rd, ad);
730}
731
732
733void Thumb2Assembler::strb(Register rd, const Address& ad, Condition cond) {
734 EmitLoadStore(cond, false, true, false, false, rd, ad);
735}
736
737
738void Thumb2Assembler::ldrh(Register rd, const Address& ad, Condition cond) {
739 EmitLoadStore(cond, true, false, true, false, rd, ad);
740}
741
742
743void Thumb2Assembler::strh(Register rd, const Address& ad, Condition cond) {
744 EmitLoadStore(cond, false, false, true, false, rd, ad);
745}
746
747
748void Thumb2Assembler::ldrsb(Register rd, const Address& ad, Condition cond) {
749 EmitLoadStore(cond, true, true, false, true, rd, ad);
750}
751
752
753void Thumb2Assembler::ldrsh(Register rd, const Address& ad, Condition cond) {
754 EmitLoadStore(cond, true, false, true, true, rd, ad);
755}
756
757
758void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
Roland Levillain4af147e2015-04-07 13:54:49 +0100759 ldrd(rd, Register(rd + 1), ad, cond);
760}
761
762
763void Thumb2Assembler::ldrd(Register rd, Register rd2, const Address& ad, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700764 CheckCondition(cond);
Roland Levillain4af147e2015-04-07 13:54:49 +0100765 // Encoding T1.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700766 // This is different from other loads. The encoding is like ARM.
767 int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
768 static_cast<int32_t>(rd) << 12 |
Roland Levillain4af147e2015-04-07 13:54:49 +0100769 static_cast<int32_t>(rd2) << 8 |
Dave Allison65fcc2c2014-04-28 13:45:27 -0700770 ad.encodingThumbLdrdStrd();
771 Emit32(encoding);
772}
773
774
775void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
Roland Levillain4af147e2015-04-07 13:54:49 +0100776 strd(rd, Register(rd + 1), ad, cond);
777}
778
779
780void Thumb2Assembler::strd(Register rd, Register rd2, const Address& ad, Condition cond) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700781 CheckCondition(cond);
Roland Levillain4af147e2015-04-07 13:54:49 +0100782 // Encoding T1.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700783 // This is different from other loads. The encoding is like ARM.
784 int32_t encoding = B31 | B30 | B29 | B27 | B22 |
785 static_cast<int32_t>(rd) << 12 |
Roland Levillain4af147e2015-04-07 13:54:49 +0100786 static_cast<int32_t>(rd2) << 8 |
Dave Allison65fcc2c2014-04-28 13:45:27 -0700787 ad.encodingThumbLdrdStrd();
788 Emit32(encoding);
789}
790
791
792void Thumb2Assembler::ldm(BlockAddressMode am,
793 Register base,
794 RegList regs,
795 Condition cond) {
Vladimir Markoe8469c12014-11-26 18:09:30 +0000796 CHECK_NE(regs, 0u); // Do not use ldm if there's nothing to load.
797 if (IsPowerOfTwo(regs)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700798 // Thumb doesn't support one reg in the list.
799 // Find the register number.
Vladimir Markoe8469c12014-11-26 18:09:30 +0000800 int reg = CTZ(static_cast<uint32_t>(regs));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700801 CHECK_LT(reg, 16);
Dave Allison45fdb932014-06-25 12:37:10 -0700802 CHECK(am == DB_W); // Only writeback is supported.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700803 ldr(static_cast<Register>(reg), Address(base, kRegisterSize, Address::PostIndex), cond);
804 } else {
805 EmitMultiMemOp(cond, am, true, base, regs);
806 }
807}
808
809
810void Thumb2Assembler::stm(BlockAddressMode am,
811 Register base,
812 RegList regs,
813 Condition cond) {
Vladimir Markoe8469c12014-11-26 18:09:30 +0000814 CHECK_NE(regs, 0u); // Do not use stm if there's nothing to store.
815 if (IsPowerOfTwo(regs)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -0700816 // Thumb doesn't support one reg in the list.
817 // Find the register number.
Vladimir Markoe8469c12014-11-26 18:09:30 +0000818 int reg = CTZ(static_cast<uint32_t>(regs));
Dave Allison65fcc2c2014-04-28 13:45:27 -0700819 CHECK_LT(reg, 16);
Dave Allison45fdb932014-06-25 12:37:10 -0700820 CHECK(am == IA || am == IA_W);
821 Address::Mode strmode = am == IA ? Address::PreIndex : Address::Offset;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700822 str(static_cast<Register>(reg), Address(base, -kRegisterSize, strmode), cond);
823 } else {
824 EmitMultiMemOp(cond, am, false, base, regs);
825 }
826}
827
828
829bool Thumb2Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
830 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
831 if (((imm32 & ((1 << 19) - 1)) == 0) &&
832 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
833 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
834 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
835 ((imm32 >> 19) & ((1 << 6) -1));
836 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
837 sd, S0, S0);
838 return true;
839 }
840 return false;
841}
842
843
844bool Thumb2Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
845 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
846 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
847 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
848 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
849 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
850 ((imm64 >> 48) & ((1 << 6) -1));
851 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
852 dd, D0, D0);
853 return true;
854 }
855 return false;
856}
857
858
859void Thumb2Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
860 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
861}
862
863
864void Thumb2Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
865 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
866}
867
868
869void Thumb2Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
870 Condition cond) {
871 EmitVFPsss(cond, B21 | B20, sd, sn, sm);
872}
873
874
875void Thumb2Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
876 Condition cond) {
877 EmitVFPddd(cond, B21 | B20, dd, dn, dm);
878}
879
880
881void Thumb2Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
882 Condition cond) {
883 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
884}
885
886
887void Thumb2Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
888 Condition cond) {
889 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
890}
891
892
893void Thumb2Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
894 Condition cond) {
895 EmitVFPsss(cond, B21, sd, sn, sm);
896}
897
898
899void Thumb2Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
900 Condition cond) {
901 EmitVFPddd(cond, B21, dd, dn, dm);
902}
903
904
905void Thumb2Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
906 Condition cond) {
907 EmitVFPsss(cond, 0, sd, sn, sm);
908}
909
910
911void Thumb2Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
912 Condition cond) {
913 EmitVFPddd(cond, 0, dd, dn, dm);
914}
915
916
917void Thumb2Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
918 Condition cond) {
919 EmitVFPsss(cond, B6, sd, sn, sm);
920}
921
922
923void Thumb2Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
924 Condition cond) {
925 EmitVFPddd(cond, B6, dd, dn, dm);
926}
927
928
929void Thumb2Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
930 Condition cond) {
931 EmitVFPsss(cond, B23, sd, sn, sm);
932}
933
934
935void Thumb2Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
936 Condition cond) {
937 EmitVFPddd(cond, B23, dd, dn, dm);
938}
939
940
941void Thumb2Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
942 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
943}
944
945
946void Thumb2Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
947 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
948}
949
950
951void Thumb2Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
952 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
953}
954
955
956void Thumb2Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
957 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
958}
959
960
961void Thumb2Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
962 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
963}
964
965void Thumb2Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
966 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
967}
968
969
970void Thumb2Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
971 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
972}
973
974
975void Thumb2Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
976 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
977}
978
979
980void Thumb2Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
981 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
982}
983
984
985void Thumb2Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
986 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
987}
988
989
990void Thumb2Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
991 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
992}
993
994
995void Thumb2Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
996 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
997}
998
999
1000void Thumb2Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1001 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1002}
1003
1004
1005void Thumb2Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1006 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1007}
1008
1009
1010void Thumb2Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1011 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1012}
1013
1014
1015void Thumb2Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1016 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1017}
1018
1019
1020void Thumb2Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1021 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1022}
1023
1024
1025void Thumb2Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1026 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1027}
1028
1029
1030void Thumb2Assembler::vcmpsz(SRegister sd, Condition cond) {
1031 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1032}
1033
1034
1035void Thumb2Assembler::vcmpdz(DRegister dd, Condition cond) {
1036 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1037}
1038
1039void Thumb2Assembler::b(Label* label, Condition cond) {
agicsakie2142d252015-06-30 17:10:03 -07001040 DCHECK_EQ(next_condition_, AL);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001041 EmitBranch(cond, label, false, false);
1042}
1043
1044
1045void Thumb2Assembler::bl(Label* label, Condition cond) {
1046 CheckCondition(cond);
1047 EmitBranch(cond, label, true, false);
1048}
1049
1050
1051void Thumb2Assembler::blx(Label* label) {
1052 EmitBranch(AL, label, true, true);
1053}
1054
1055
1056void Thumb2Assembler::MarkExceptionHandler(Label* label) {
1057 EmitDataProcessing(AL, TST, 1, PC, R0, ShifterOperand(0));
1058 Label l;
1059 b(&l);
1060 EmitBranch(AL, label, false, false);
1061 Bind(&l);
1062}
1063
1064
1065void Thumb2Assembler::Emit32(int32_t value) {
1066 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1067 buffer_.Emit<int16_t>(value >> 16);
1068 buffer_.Emit<int16_t>(value & 0xffff);
1069}
1070
1071
1072void Thumb2Assembler::Emit16(int16_t value) {
1073 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1074 buffer_.Emit<int16_t>(value);
1075}
1076
1077
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001078bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001079 Opcode opcode,
Andreas Gampeca714582015-04-03 19:41:34 -07001080 bool set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001081 Register rn,
1082 Register rd,
1083 const ShifterOperand& so) {
1084 if (force_32bit_) {
1085 return true;
1086 }
1087
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001088 // Check special case for SP relative ADD and SUB immediate.
1089 if ((opcode == ADD || opcode == SUB) && rn == SP && so.IsImmediate()) {
1090 // If the immediate is in range, use 16 bit.
1091 if (rd == SP) {
1092 if (so.GetImmediate() < (1 << 9)) { // 9 bit immediate.
1093 return false;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001094 }
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001095 } else if (!IsHighRegister(rd) && opcode == ADD) {
1096 if (so.GetImmediate() < (1 << 10)) { // 10 bit immediate.
1097 return false;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001098 }
1099 }
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001100 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07001101
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001102 bool can_contain_high_register = (opcode == MOV)
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001103 || ((opcode == ADD) && (rn == rd) && !set_cc);
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001104
1105 if (IsHighRegister(rd) || IsHighRegister(rn)) {
1106 if (!can_contain_high_register) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001107 return true;
1108 }
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +01001109
Vladimir Marko5bc561c2014-12-16 17:41:59 +00001110 // There are high register instructions available for this opcode.
1111 // However, there is no actual shift available, neither for ADD nor for MOV (ASR/LSR/LSL/ROR).
1112 if (so.IsShift() && (so.GetShift() == RRX || so.GetImmediate() != 0u)) {
1113 return true;
1114 }
1115
1116 // The ADD and MOV instructions that work with high registers don't have 16-bit
1117 // immediate variants.
1118 if (so.IsImmediate()) {
Nicolas Geoffray3c7bb982014-07-23 16:04:16 +01001119 return true;
1120 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07001121 }
1122
1123 if (so.IsRegister() && IsHighRegister(so.GetRegister()) && !can_contain_high_register) {
1124 return true;
1125 }
1126
Dave Allison65fcc2c2014-04-28 13:45:27 -07001127 bool rn_is_valid = true;
1128
1129 // Check for single operand instructions and ADD/SUB.
1130 switch (opcode) {
1131 case CMP:
1132 case MOV:
1133 case TST:
1134 case MVN:
1135 rn_is_valid = false; // There is no Rn for these instructions.
1136 break;
1137 case TEQ:
1138 return true;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001139 case ADD:
1140 case SUB:
1141 break;
1142 default:
1143 if (so.IsRegister() && rd != rn) {
1144 return true;
1145 }
1146 }
1147
1148 if (so.IsImmediate()) {
1149 if (rn_is_valid && rn != rd) {
1150 // The only thumb1 instruction with a register and an immediate are ADD and SUB. The
1151 // immediate must be 3 bits.
1152 if (opcode != ADD && opcode != SUB) {
1153 return true;
1154 } else {
1155 // Check that the immediate is 3 bits for ADD and SUB.
1156 if (so.GetImmediate() >= 8) {
1157 return true;
1158 }
1159 }
1160 } else {
1161 // ADD, SUB, CMP and MOV may be thumb1 only if the immediate is 8 bits.
1162 if (!(opcode == ADD || opcode == SUB || opcode == MOV || opcode == CMP)) {
1163 return true;
1164 } else {
1165 if (so.GetImmediate() > 255) {
1166 return true;
1167 }
1168 }
1169 }
1170 }
1171
Zheng Xuc6667102015-05-15 16:08:45 +08001172 // Check for register shift operand.
1173 if (so.IsRegister() && so.IsShift()) {
1174 if (opcode != MOV) {
1175 return true;
1176 }
1177 // Check for MOV with an ROR.
1178 if (so.GetShift() == ROR) {
1179 if (so.GetImmediate() != 0) {
1180 return true;
1181 }
1182 }
1183 }
1184
Dave Allison65fcc2c2014-04-28 13:45:27 -07001185 // The instruction can be encoded in 16 bits.
1186 return false;
1187}
1188
1189
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001190void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001191 Opcode opcode,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001192 bool set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001193 Register rn,
1194 Register rd,
1195 const ShifterOperand& so) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001196 uint8_t thumb_opcode = 255U /* 0b11111111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001197 switch (opcode) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001198 case AND: thumb_opcode = 0U /* 0b0000 */; break;
1199 case EOR: thumb_opcode = 4U /* 0b0100 */; break;
1200 case SUB: thumb_opcode = 13U /* 0b1101 */; break;
1201 case RSB: thumb_opcode = 14U /* 0b1110 */; break;
1202 case ADD: thumb_opcode = 8U /* 0b1000 */; break;
Andreas Gampe35c68e32014-09-30 08:39:37 -07001203 case ADC: thumb_opcode = 10U /* 0b1010 */; break;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001204 case SBC: thumb_opcode = 11U /* 0b1011 */; break;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001205 case RSC: break;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001206 case TST: thumb_opcode = 0U /* 0b0000 */; set_cc = true; rd = PC; break;
1207 case TEQ: thumb_opcode = 4U /* 0b0100 */; set_cc = true; rd = PC; break;
1208 case CMP: thumb_opcode = 13U /* 0b1101 */; set_cc = true; rd = PC; break;
1209 case CMN: thumb_opcode = 8U /* 0b1000 */; set_cc = true; rd = PC; break;
1210 case ORR: thumb_opcode = 2U /* 0b0010 */; break;
1211 case MOV: thumb_opcode = 2U /* 0b0010 */; rn = PC; break;
1212 case BIC: thumb_opcode = 1U /* 0b0001 */; break;
1213 case MVN: thumb_opcode = 3U /* 0b0011 */; rn = PC; break;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001214 default:
1215 break;
1216 }
1217
Andreas Gampec8ccf682014-09-29 20:07:43 -07001218 if (thumb_opcode == 255U /* 0b11111111 */) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001219 LOG(FATAL) << "Invalid thumb2 opcode " << opcode;
Vladimir Markoe8469c12014-11-26 18:09:30 +00001220 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001221 }
1222
1223 int32_t encoding = 0;
1224 if (so.IsImmediate()) {
1225 // Check special cases.
Nicolas Geoffray96f89a22014-07-11 10:57:49 +01001226 if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) {
Guillaume "Vermeille" Sanchezdc62c482015-03-11 14:30:31 +00001227 if (!set_cc) {
1228 if (opcode == SUB) {
1229 thumb_opcode = 5U;
1230 } else if (opcode == ADD) {
1231 thumb_opcode = 0U;
1232 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07001233 }
1234 uint32_t imm = so.GetImmediate();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001235
1236 uint32_t i = (imm >> 11) & 1;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001237 uint32_t imm3 = (imm >> 8) & 7U /* 0b111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001238 uint32_t imm8 = imm & 0xff;
1239
Guillaume "Vermeille" Sanchezdc62c482015-03-11 14:30:31 +00001240 encoding = B31 | B30 | B29 | B28 |
1241 (set_cc ? B20 : B25) |
1242 thumb_opcode << 21 |
1243 rn << 16 |
1244 rd << 8 |
1245 i << 26 |
1246 imm3 << 12 |
1247 imm8;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001248 } else {
1249 // Modified immediate.
Dave Allison45fdb932014-06-25 12:37:10 -07001250 uint32_t imm = ModifiedImmediate(so.encodingThumb());
Dave Allison65fcc2c2014-04-28 13:45:27 -07001251 if (imm == kInvalidModifiedImmediate) {
1252 LOG(FATAL) << "Immediate value cannot fit in thumb2 modified immediate";
Vladimir Markoe8469c12014-11-26 18:09:30 +00001253 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001254 }
1255 encoding = B31 | B30 | B29 | B28 |
1256 thumb_opcode << 21 |
Guillaume "Vermeille" Sanchezdc62c482015-03-11 14:30:31 +00001257 (set_cc ? B20 : 0) |
Dave Allison65fcc2c2014-04-28 13:45:27 -07001258 rn << 16 |
1259 rd << 8 |
1260 imm;
1261 }
1262 } else if (so.IsRegister()) {
Guillaume "Vermeille" Sanchezdc62c482015-03-11 14:30:31 +00001263 // Register (possibly shifted)
1264 encoding = B31 | B30 | B29 | B27 | B25 |
1265 thumb_opcode << 21 |
1266 (set_cc ? B20 : 0) |
1267 rn << 16 |
1268 rd << 8 |
1269 so.encodingThumb();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001270 }
1271 Emit32(encoding);
1272}
1273
1274
1275void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
1276 Opcode opcode,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001277 bool set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001278 Register rn,
1279 Register rd,
1280 const ShifterOperand& so) {
1281 if (opcode == ADD || opcode == SUB) {
1282 Emit16BitAddSub(cond, opcode, set_cc, rn, rd, so);
1283 return;
1284 }
Andreas Gampec8ccf682014-09-29 20:07:43 -07001285 uint8_t thumb_opcode = 255U /* 0b11111111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001286 // Thumb1.
Andreas Gampec8ccf682014-09-29 20:07:43 -07001287 uint8_t dp_opcode = 1U /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001288 uint8_t opcode_shift = 6;
1289 uint8_t rd_shift = 0;
1290 uint8_t rn_shift = 3;
1291 uint8_t immediate_shift = 0;
1292 bool use_immediate = false;
1293 uint8_t immediate = 0;
1294
1295 if (opcode == MOV && so.IsRegister() && so.IsShift()) {
1296 // Convert shifted mov operand2 into 16 bit opcodes.
1297 dp_opcode = 0;
1298 opcode_shift = 11;
1299
1300 use_immediate = true;
1301 immediate = so.GetImmediate();
1302 immediate_shift = 6;
1303
1304 rn = so.GetRegister();
1305
1306 switch (so.GetShift()) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001307 case LSL: thumb_opcode = 0U /* 0b00 */; break;
1308 case LSR: thumb_opcode = 1U /* 0b01 */; break;
1309 case ASR: thumb_opcode = 2U /* 0b10 */; break;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001310 case ROR:
1311 // ROR doesn't allow immediates.
Andreas Gampec8ccf682014-09-29 20:07:43 -07001312 thumb_opcode = 7U /* 0b111 */;
1313 dp_opcode = 1U /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001314 opcode_shift = 6;
1315 use_immediate = false;
1316 break;
1317 case RRX: break;
1318 default:
1319 break;
1320 }
1321 } else {
1322 if (so.IsImmediate()) {
1323 use_immediate = true;
1324 immediate = so.GetImmediate();
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001325 } else {
Guillaume "Vermeille" Sanchezab4a2f52015-03-11 14:00:30 +00001326 CHECK(!(so.IsRegister() && so.IsShift() && so.GetSecondRegister() != kNoRegister))
1327 << "No register-shifted register instruction available in thumb";
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001328 // Adjust rn and rd: only two registers will be emitted.
1329 switch (opcode) {
1330 case AND:
1331 case ORR:
1332 case EOR:
1333 case RSB:
1334 case ADC:
1335 case SBC:
1336 case BIC: {
1337 if (rn == rd) {
1338 rn = so.GetRegister();
1339 } else {
1340 CHECK_EQ(rd, so.GetRegister());
1341 }
1342 break;
1343 }
1344 case CMP:
1345 case CMN: {
1346 CHECK_EQ(rd, 0);
1347 rd = rn;
1348 rn = so.GetRegister();
1349 break;
1350 }
Andreas Gampe7b7e5242015-02-02 19:17:11 -08001351 case TST:
1352 case TEQ:
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001353 case MVN: {
1354 CHECK_EQ(rn, 0);
1355 rn = so.GetRegister();
1356 break;
1357 }
1358 default:
1359 break;
1360 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07001361 }
1362
1363 switch (opcode) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001364 case AND: thumb_opcode = 0U /* 0b0000 */; break;
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001365 case ORR: thumb_opcode = 12U /* 0b1100 */; break;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001366 case EOR: thumb_opcode = 1U /* 0b0001 */; break;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001367 case RSB: thumb_opcode = 9U /* 0b1001 */; break;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001368 case ADC: thumb_opcode = 5U /* 0b0101 */; break;
1369 case SBC: thumb_opcode = 6U /* 0b0110 */; break;
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001370 case BIC: thumb_opcode = 14U /* 0b1110 */; break;
1371 case TST: thumb_opcode = 8U /* 0b1000 */; CHECK(!use_immediate); break;
1372 case MVN: thumb_opcode = 15U /* 0b1111 */; CHECK(!use_immediate); break;
1373 case CMP: {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001374 if (use_immediate) {
1375 // T2 encoding.
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001376 dp_opcode = 0;
1377 opcode_shift = 11;
1378 thumb_opcode = 5U /* 0b101 */;
1379 rd_shift = 8;
1380 rn_shift = 8;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001381 } else {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001382 thumb_opcode = 10U /* 0b1010 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001383 }
1384
1385 break;
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001386 }
Nicolas Geoffray96f89a22014-07-11 10:57:49 +01001387 case CMN: {
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001388 CHECK(!use_immediate);
Andreas Gampec8ccf682014-09-29 20:07:43 -07001389 thumb_opcode = 11U /* 0b1011 */;
Nicolas Geoffray96f89a22014-07-11 10:57:49 +01001390 break;
1391 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07001392 case MOV:
1393 dp_opcode = 0;
1394 if (use_immediate) {
1395 // T2 encoding.
1396 opcode_shift = 11;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001397 thumb_opcode = 4U /* 0b100 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001398 rd_shift = 8;
1399 rn_shift = 8;
1400 } else {
1401 rn = so.GetRegister();
1402 if (IsHighRegister(rn) || IsHighRegister(rd)) {
1403 // Special mov for high registers.
Andreas Gampec8ccf682014-09-29 20:07:43 -07001404 dp_opcode = 1U /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001405 opcode_shift = 7;
1406 // Put the top bit of rd into the bottom bit of the opcode.
Andreas Gampec8ccf682014-09-29 20:07:43 -07001407 thumb_opcode = 12U /* 0b0001100 */ | static_cast<uint32_t>(rd) >> 3;
1408 rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001409 } else {
1410 thumb_opcode = 0;
1411 }
1412 }
1413 break;
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001414
1415 case TEQ:
1416 case RSC:
Dave Allison65fcc2c2014-04-28 13:45:27 -07001417 default:
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001418 LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001419 break;
1420 }
1421 }
1422
Andreas Gampec8ccf682014-09-29 20:07:43 -07001423 if (thumb_opcode == 255U /* 0b11111111 */) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001424 LOG(FATAL) << "Invalid thumb1 opcode " << opcode;
Vladimir Markoe8469c12014-11-26 18:09:30 +00001425 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001426 }
1427
1428 int16_t encoding = dp_opcode << 14 |
1429 (thumb_opcode << opcode_shift) |
1430 rd << rd_shift |
1431 rn << rn_shift |
1432 (use_immediate ? (immediate << immediate_shift) : 0);
1433
1434 Emit16(encoding);
1435}
1436
1437
1438// ADD and SUB are complex enough to warrant their own emitter.
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001439void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001440 Opcode opcode,
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001441 bool set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001442 Register rn,
1443 Register rd,
1444 const ShifterOperand& so) {
1445 uint8_t dp_opcode = 0;
1446 uint8_t opcode_shift = 6;
1447 uint8_t rd_shift = 0;
1448 uint8_t rn_shift = 3;
1449 uint8_t immediate_shift = 0;
1450 bool use_immediate = false;
Vladimir Markoac0341e2014-12-18 19:56:49 +00001451 uint32_t immediate = 0; // Should be at most 9 bits but keep the full immediate for CHECKs.
Dave Allison65fcc2c2014-04-28 13:45:27 -07001452 uint8_t thumb_opcode;;
1453
1454 if (so.IsImmediate()) {
1455 use_immediate = true;
1456 immediate = so.GetImmediate();
1457 }
1458
1459 switch (opcode) {
1460 case ADD:
1461 if (so.IsRegister()) {
1462 Register rm = so.GetRegister();
Andreas Gampe513ea0c2015-02-02 13:17:52 -08001463 if (rn == rd && !set_cc) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07001464 // Can use T2 encoding (allows 4 bit registers)
Andreas Gampec8ccf682014-09-29 20:07:43 -07001465 dp_opcode = 1U /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001466 opcode_shift = 10;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001467 thumb_opcode = 1U /* 0b0001 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001468 // Make Rn also contain the top bit of rd.
1469 rn = static_cast<Register>(static_cast<uint32_t>(rm) |
Andreas Gampec8ccf682014-09-29 20:07:43 -07001470 (static_cast<uint32_t>(rd) & 8U /* 0b1000 */) << 1);
1471 rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001472 } else {
1473 // T1.
1474 opcode_shift = 9;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001475 thumb_opcode = 12U /* 0b01100 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001476 immediate = static_cast<uint32_t>(so.GetRegister());
1477 use_immediate = true;
1478 immediate_shift = 6;
1479 }
1480 } else {
1481 // Immediate.
1482 if (rd == SP && rn == SP) {
1483 // ADD sp, sp, #imm
Andreas Gampec8ccf682014-09-29 20:07:43 -07001484 dp_opcode = 2U /* 0b10 */;
1485 thumb_opcode = 3U /* 0b11 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001486 opcode_shift = 12;
Vladimir Markoac0341e2014-12-18 19:56:49 +00001487 CHECK_LT(immediate, (1u << 9));
Roland Levillain14d90572015-07-16 10:52:26 +01001488 CHECK_ALIGNED(immediate, 4);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001489
1490 // Remove rd and rn from instruction by orring it with immed and clearing bits.
1491 rn = R0;
1492 rd = R0;
1493 rd_shift = 0;
1494 rn_shift = 0;
1495 immediate >>= 2;
1496 } else if (rd != SP && rn == SP) {
1497 // ADD rd, SP, #imm
Andreas Gampec8ccf682014-09-29 20:07:43 -07001498 dp_opcode = 2U /* 0b10 */;
1499 thumb_opcode = 5U /* 0b101 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001500 opcode_shift = 11;
Vladimir Markoac0341e2014-12-18 19:56:49 +00001501 CHECK_LT(immediate, (1u << 10));
Roland Levillain14d90572015-07-16 10:52:26 +01001502 CHECK_ALIGNED(immediate, 4);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001503
1504 // Remove rn from instruction.
1505 rn = R0;
1506 rn_shift = 0;
1507 rd_shift = 8;
1508 immediate >>= 2;
1509 } else if (rn != rd) {
1510 // Must use T1.
1511 opcode_shift = 9;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001512 thumb_opcode = 14U /* 0b01110 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001513 immediate_shift = 6;
1514 } else {
1515 // T2 encoding.
1516 opcode_shift = 11;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001517 thumb_opcode = 6U /* 0b110 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001518 rd_shift = 8;
1519 rn_shift = 8;
1520 }
1521 }
1522 break;
1523
1524 case SUB:
1525 if (so.IsRegister()) {
1526 // T1.
1527 opcode_shift = 9;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001528 thumb_opcode = 13U /* 0b01101 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001529 immediate = static_cast<uint32_t>(so.GetRegister());
1530 use_immediate = true;
1531 immediate_shift = 6;
1532 } else {
1533 if (rd == SP && rn == SP) {
1534 // SUB sp, sp, #imm
Andreas Gampec8ccf682014-09-29 20:07:43 -07001535 dp_opcode = 2U /* 0b10 */;
1536 thumb_opcode = 0x61 /* 0b1100001 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001537 opcode_shift = 7;
Vladimir Markoac0341e2014-12-18 19:56:49 +00001538 CHECK_LT(immediate, (1u << 9));
Roland Levillain14d90572015-07-16 10:52:26 +01001539 CHECK_ALIGNED(immediate, 4);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001540
1541 // Remove rd and rn from instruction by orring it with immed and clearing bits.
1542 rn = R0;
1543 rd = R0;
1544 rd_shift = 0;
1545 rn_shift = 0;
1546 immediate >>= 2;
1547 } else if (rn != rd) {
1548 // Must use T1.
1549 opcode_shift = 9;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001550 thumb_opcode = 15U /* 0b01111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001551 immediate_shift = 6;
1552 } else {
1553 // T2 encoding.
1554 opcode_shift = 11;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001555 thumb_opcode = 7U /* 0b111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001556 rd_shift = 8;
1557 rn_shift = 8;
1558 }
1559 }
1560 break;
1561 default:
1562 LOG(FATAL) << "This opcode is not an ADD or SUB: " << opcode;
Vladimir Markoe8469c12014-11-26 18:09:30 +00001563 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07001564 }
1565
1566 int16_t encoding = dp_opcode << 14 |
1567 (thumb_opcode << opcode_shift) |
1568 rd << rd_shift |
1569 rn << rn_shift |
1570 (use_immediate ? (immediate << immediate_shift) : 0);
1571
1572 Emit16(encoding);
1573}
1574
1575
1576void Thumb2Assembler::EmitDataProcessing(Condition cond,
1577 Opcode opcode,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001578 bool set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -07001579 Register rn,
1580 Register rd,
1581 const ShifterOperand& so) {
1582 CHECK_NE(rd, kNoRegister);
1583 CheckCondition(cond);
1584
1585 if (Is32BitDataProcessing(cond, opcode, set_cc, rn, rd, so)) {
1586 Emit32BitDataProcessing(cond, opcode, set_cc, rn, rd, so);
1587 } else {
1588 Emit16BitDataProcessing(cond, opcode, set_cc, rn, rd, so);
1589 }
1590}
1591
Dave Allison45fdb932014-06-25 12:37:10 -07001592void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, bool setcc) {
1593 CHECK_LT(amount, (1 << 5));
1594 if (IsHighRegister(rd) || IsHighRegister(rm) || shift == ROR || shift == RRX) {
1595 uint16_t opcode = 0;
1596 switch (shift) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001597 case LSL: opcode = 0U /* 0b00 */; break;
1598 case LSR: opcode = 1U /* 0b01 */; break;
1599 case ASR: opcode = 2U /* 0b10 */; break;
1600 case ROR: opcode = 3U /* 0b11 */; break;
1601 case RRX: opcode = 3U /* 0b11 */; amount = 0; break;
Dave Allison45fdb932014-06-25 12:37:10 -07001602 default:
1603 LOG(FATAL) << "Unsupported thumb2 shift opcode";
Vladimir Markoe8469c12014-11-26 18:09:30 +00001604 UNREACHABLE();
Dave Allison45fdb932014-06-25 12:37:10 -07001605 }
1606 // 32 bit.
1607 int32_t encoding = B31 | B30 | B29 | B27 | B25 | B22 |
1608 0xf << 16 | (setcc ? B20 : 0);
1609 uint32_t imm3 = amount >> 2;
Andreas Gampec8ccf682014-09-29 20:07:43 -07001610 uint32_t imm2 = amount & 3U /* 0b11 */;
Dave Allison45fdb932014-06-25 12:37:10 -07001611 encoding |= imm3 << 12 | imm2 << 6 | static_cast<int16_t>(rm) |
1612 static_cast<int16_t>(rd) << 8 | opcode << 4;
1613 Emit32(encoding);
1614 } else {
1615 // 16 bit shift
1616 uint16_t opcode = 0;
1617 switch (shift) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001618 case LSL: opcode = 0U /* 0b00 */; break;
1619 case LSR: opcode = 1U /* 0b01 */; break;
1620 case ASR: opcode = 2U /* 0b10 */; break;
Dave Allison45fdb932014-06-25 12:37:10 -07001621 default:
Vladimir Markoe8469c12014-11-26 18:09:30 +00001622 LOG(FATAL) << "Unsupported thumb2 shift opcode";
1623 UNREACHABLE();
Dave Allison45fdb932014-06-25 12:37:10 -07001624 }
1625 int16_t encoding = opcode << 11 | amount << 6 | static_cast<int16_t>(rm) << 3 |
1626 static_cast<int16_t>(rd);
1627 Emit16(encoding);
1628 }
1629}
1630
1631void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register rm, bool setcc) {
1632 CHECK_NE(shift, RRX);
1633 bool must_be_32bit = false;
1634 if (IsHighRegister(rd) || IsHighRegister(rm) || IsHighRegister(rn) || rd != rn) {
1635 must_be_32bit = true;
1636 }
1637
1638 if (must_be_32bit) {
1639 uint16_t opcode = 0;
1640 switch (shift) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001641 case LSL: opcode = 0U /* 0b00 */; break;
1642 case LSR: opcode = 1U /* 0b01 */; break;
1643 case ASR: opcode = 2U /* 0b10 */; break;
1644 case ROR: opcode = 3U /* 0b11 */; break;
Dave Allison45fdb932014-06-25 12:37:10 -07001645 default:
1646 LOG(FATAL) << "Unsupported thumb2 shift opcode";
Vladimir Markoe8469c12014-11-26 18:09:30 +00001647 UNREACHABLE();
Dave Allison45fdb932014-06-25 12:37:10 -07001648 }
1649 // 32 bit.
1650 int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 |
1651 0xf << 12 | (setcc ? B20 : 0);
1652 encoding |= static_cast<int16_t>(rn) << 16 | static_cast<int16_t>(rm) |
1653 static_cast<int16_t>(rd) << 8 | opcode << 21;
1654 Emit32(encoding);
1655 } else {
1656 uint16_t opcode = 0;
1657 switch (shift) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07001658 case LSL: opcode = 2U /* 0b0010 */; break;
1659 case LSR: opcode = 3U /* 0b0011 */; break;
1660 case ASR: opcode = 4U /* 0b0100 */; break;
Dave Allison45fdb932014-06-25 12:37:10 -07001661 default:
Vladimir Markoe8469c12014-11-26 18:09:30 +00001662 LOG(FATAL) << "Unsupported thumb2 shift opcode";
1663 UNREACHABLE();
Dave Allison45fdb932014-06-25 12:37:10 -07001664 }
1665 int16_t encoding = B14 | opcode << 6 | static_cast<int16_t>(rm) << 3 |
1666 static_cast<int16_t>(rd);
1667 Emit16(encoding);
1668 }
1669}
1670
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001671inline size_t Thumb2Assembler::Fixup::SizeInBytes(Size size) {
1672 switch (size) {
1673 case kBranch16Bit:
1674 return 2u;
1675 case kBranch32Bit:
1676 return 4u;
Dave Allison45fdb932014-06-25 12:37:10 -07001677
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001678 case kCbxz16Bit:
1679 return 2u;
1680 case kCbxz32Bit:
1681 return 4u;
1682 case kCbxz48Bit:
1683 return 6u;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001684
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001685 case kLiteral1KiB:
1686 return 2u;
1687 case kLiteral4KiB:
1688 return 4u;
1689 case kLiteral64KiB:
1690 return 8u;
1691 case kLiteral1MiB:
1692 return 10u;
1693 case kLiteralFar:
1694 return 14u;
Dave Allison65fcc2c2014-04-28 13:45:27 -07001695
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001696 case kLongOrFPLiteral1KiB:
1697 return 4u;
1698 case kLongOrFPLiteral256KiB:
1699 return 10u;
1700 case kLongOrFPLiteralFar:
1701 return 14u;
1702 }
1703 LOG(FATAL) << "Unexpected size: " << static_cast<int>(size);
1704 UNREACHABLE();
1705}
1706
1707inline uint32_t Thumb2Assembler::Fixup::GetOriginalSizeInBytes() const {
1708 return SizeInBytes(original_size_);
1709}
1710
1711inline uint32_t Thumb2Assembler::Fixup::GetSizeInBytes() const {
1712 return SizeInBytes(size_);
1713}
1714
1715inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) {
1716 // The code size must be a multiple of 2.
Roland Levillain14d90572015-07-16 10:52:26 +01001717 DCHECK_ALIGNED(current_code_size, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001718 // If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool.
1719 return current_code_size & 2;
1720}
1721
1722inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) const {
1723 static constexpr int32_t int32_min = std::numeric_limits<int32_t>::min();
1724 static constexpr int32_t int32_max = std::numeric_limits<int32_t>::max();
1725 DCHECK_LE(target_, static_cast<uint32_t>(int32_max));
1726 DCHECK_LE(location_, static_cast<uint32_t>(int32_max));
1727 DCHECK_LE(adjustment_, static_cast<uint32_t>(int32_max));
1728 int32_t diff = static_cast<int32_t>(target_) - static_cast<int32_t>(location_);
1729 if (target_ > location_) {
1730 DCHECK_LE(adjustment_, static_cast<uint32_t>(int32_max - diff));
1731 diff += static_cast<int32_t>(adjustment_);
Dave Allison65fcc2c2014-04-28 13:45:27 -07001732 } else {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001733 DCHECK_LE(int32_min + static_cast<int32_t>(adjustment_), diff);
1734 diff -= static_cast<int32_t>(adjustment_);
1735 }
1736 // The default PC adjustment for Thumb2 is 4 bytes.
1737 DCHECK_GE(diff, int32_min + 4);
1738 diff -= 4;
1739 // Add additional adjustment for instructions preceding the PC usage, padding
1740 // before the literal pool and rounding down the PC for literal loads.
1741 switch (GetSize()) {
1742 case kBranch16Bit:
1743 case kBranch32Bit:
1744 break;
1745
1746 case kCbxz16Bit:
1747 break;
1748 case kCbxz32Bit:
1749 case kCbxz48Bit:
1750 DCHECK_GE(diff, int32_min + 2);
1751 diff -= 2; // Extra CMP Rn, #0, 16-bit.
1752 break;
1753
1754 case kLiteral1KiB:
1755 case kLiteral4KiB:
1756 case kLongOrFPLiteral1KiB:
1757 DCHECK(diff >= 0 || (GetSize() == kLiteral1KiB && diff == -2));
1758 diff += LiteralPoolPaddingSize(current_code_size);
1759 // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC
1760 // isn't a multiple of 2, we need to adjust. Since we already adjusted for the target
1761 // being aligned, current PC alignment can be inferred from diff.
Roland Levillain14d90572015-07-16 10:52:26 +01001762 DCHECK_ALIGNED(diff, 2);
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001763 diff = diff + (diff & 2);
1764 DCHECK_GE(diff, 0);
1765 break;
1766 case kLiteral1MiB:
1767 case kLiteral64KiB:
1768 case kLongOrFPLiteral256KiB:
1769 DCHECK_GE(diff, 4); // The target must be at least 4 bytes after the ADD rX, PC.
1770 diff -= 4; // One extra 32-bit MOV.
1771 diff += LiteralPoolPaddingSize(current_code_size);
1772 break;
1773 case kLiteralFar:
1774 case kLongOrFPLiteralFar:
1775 DCHECK_GE(diff, 8); // The target must be at least 4 bytes after the ADD rX, PC.
1776 diff -= 8; // Extra MOVW+MOVT; both 32-bit.
1777 diff += LiteralPoolPaddingSize(current_code_size);
1778 break;
1779 }
1780 return diff;
1781}
1782
1783inline size_t Thumb2Assembler::Fixup::IncreaseSize(Size new_size) {
1784 DCHECK_NE(target_, kUnresolved);
1785 Size old_size = size_;
1786 size_ = new_size;
1787 DCHECK_GT(SizeInBytes(new_size), SizeInBytes(old_size));
1788 size_t adjustment = SizeInBytes(new_size) - SizeInBytes(old_size);
1789 if (target_ > location_) {
1790 adjustment_ += adjustment;
1791 }
1792 return adjustment;
1793}
1794
1795uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) {
1796 uint32_t old_code_size = current_code_size;
1797 switch (GetSize()) {
1798 case kBranch16Bit:
1799 if (IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size))) {
1800 break;
Vladimir Markof38caa62015-05-29 15:50:18 +01001801 }
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001802 current_code_size += IncreaseSize(kBranch32Bit);
1803 FALLTHROUGH_INTENDED;
1804 case kBranch32Bit:
1805 // We don't support conditional branches beyond +-1MiB
1806 // or unconditional branches beyond +-16MiB.
1807 break;
1808
1809 case kCbxz16Bit:
1810 if (IsUint<7>(GetOffset(current_code_size))) {
1811 break;
1812 }
1813 current_code_size += IncreaseSize(kCbxz32Bit);
1814 FALLTHROUGH_INTENDED;
1815 case kCbxz32Bit:
1816 if (IsInt<9>(GetOffset(current_code_size))) {
1817 break;
1818 }
1819 current_code_size += IncreaseSize(kCbxz48Bit);
1820 FALLTHROUGH_INTENDED;
1821 case kCbxz48Bit:
1822 // We don't support conditional branches beyond +-1MiB.
1823 break;
1824
1825 case kLiteral1KiB:
1826 DCHECK(!IsHighRegister(rn_));
1827 if (IsUint<10>(GetOffset(current_code_size))) {
1828 break;
1829 }
1830 current_code_size += IncreaseSize(kLiteral4KiB);
1831 FALLTHROUGH_INTENDED;
1832 case kLiteral4KiB:
1833 if (IsUint<12>(GetOffset(current_code_size))) {
1834 break;
1835 }
1836 current_code_size += IncreaseSize(kLiteral64KiB);
1837 FALLTHROUGH_INTENDED;
1838 case kLiteral64KiB:
1839 // Can't handle high register which we can encounter by fall-through from kLiteral4KiB.
1840 if (!IsHighRegister(rn_) && IsUint<16>(GetOffset(current_code_size))) {
1841 break;
1842 }
1843 current_code_size += IncreaseSize(kLiteral1MiB);
1844 FALLTHROUGH_INTENDED;
1845 case kLiteral1MiB:
1846 if (IsUint<20>(GetOffset(current_code_size))) {
1847 break;
1848 }
1849 current_code_size += IncreaseSize(kLiteralFar);
1850 FALLTHROUGH_INTENDED;
1851 case kLiteralFar:
1852 // This encoding can reach any target.
1853 break;
1854
1855 case kLongOrFPLiteral1KiB:
1856 if (IsUint<10>(GetOffset(current_code_size))) {
1857 break;
1858 }
1859 current_code_size += IncreaseSize(kLongOrFPLiteral256KiB);
1860 FALLTHROUGH_INTENDED;
1861 case kLongOrFPLiteral256KiB:
1862 if (IsUint<18>(GetOffset(current_code_size))) {
1863 break;
1864 }
1865 current_code_size += IncreaseSize(kLongOrFPLiteralFar);
1866 FALLTHROUGH_INTENDED;
1867 case kLongOrFPLiteralFar:
1868 // This encoding can reach any target.
1869 break;
1870 }
1871 return current_code_size - old_code_size;
1872}
1873
1874void Thumb2Assembler::Fixup::Emit(AssemblerBuffer* buffer, uint32_t code_size) const {
1875 switch (GetSize()) {
1876 case kBranch16Bit: {
1877 DCHECK(type_ == kUnconditional || type_ == kConditional);
1878 DCHECK_EQ(type_ == kConditional, cond_ != AL);
1879 int16_t encoding = BEncoding16(GetOffset(code_size), cond_);
Vladimir Markof38caa62015-05-29 15:50:18 +01001880 buffer->Store<int16_t>(location_, encoding);
Vladimir Markocf93a5c2015-06-16 11:33:24 +00001881 break;
1882 }
1883 case kBranch32Bit: {
1884 DCHECK(type_ == kConditional || type_ == kUnconditional ||
1885 type_ == kUnconditionalLink || type_ == kUnconditionalLinkX);
1886 DCHECK_EQ(type_ == kConditional, cond_ != AL);
1887 int32_t encoding = BEncoding32(GetOffset(code_size), cond_);
1888 if (type_ == kUnconditionalLink) {
1889 DCHECK_NE(encoding & B12, 0);
1890 encoding |= B14;
1891 } else if (type_ == kUnconditionalLinkX) {
1892 DCHECK_NE(encoding & B12, 0);
1893 encoding ^= B14 | B12;
1894 }
1895 buffer->Store<int16_t>(location_, encoding >> 16);
1896 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
1897 break;
1898 }
1899
1900 case kCbxz16Bit: {
1901 DCHECK(type_ == kCompareAndBranchXZero);
1902 int16_t encoding = CbxzEncoding16(rn_, GetOffset(code_size), cond_);
1903 buffer->Store<int16_t>(location_, encoding);
1904 break;
1905 }
1906 case kCbxz32Bit: {
1907 DCHECK(type_ == kCompareAndBranchXZero);
1908 DCHECK(cond_ == EQ || cond_ == NE);
1909 int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
1910 int16_t b_encoding = BEncoding16(GetOffset(code_size), cond_);
1911 buffer->Store<int16_t>(location_, cmp_encoding);
1912 buffer->Store<int16_t>(location_ + 2, b_encoding);
1913 break;
1914 }
1915 case kCbxz48Bit: {
1916 DCHECK(type_ == kCompareAndBranchXZero);
1917 DCHECK(cond_ == EQ || cond_ == NE);
1918 int16_t cmp_encoding = CmpRnImm8Encoding16(rn_, 0);
1919 int32_t b_encoding = BEncoding32(GetOffset(code_size), cond_);
1920 buffer->Store<int16_t>(location_, cmp_encoding);
1921 buffer->Store<int16_t>(location_ + 2u, b_encoding >> 16);
1922 buffer->Store<int16_t>(location_ + 4u, static_cast<int16_t>(b_encoding & 0xffff));
1923 break;
1924 }
1925
1926 case kLiteral1KiB: {
1927 DCHECK(type_ == kLoadLiteralNarrow);
1928 int16_t encoding = LdrLitEncoding16(rn_, GetOffset(code_size));
1929 buffer->Store<int16_t>(location_, encoding);
1930 break;
1931 }
1932 case kLiteral4KiB: {
1933 DCHECK(type_ == kLoadLiteralNarrow);
1934 // GetOffset() uses PC+4 but load literal uses AlignDown(PC+4, 4). Adjust offset accordingly.
1935 int32_t encoding = LdrLitEncoding32(rn_, GetOffset(code_size));
1936 buffer->Store<int16_t>(location_, encoding >> 16);
1937 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
1938 break;
1939 }
1940 case kLiteral64KiB: {
1941 DCHECK(type_ == kLoadLiteralNarrow);
1942 int32_t mov_encoding = MovwEncoding32(rn_, GetOffset(code_size));
1943 int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
1944 int16_t ldr_encoding = LdrRtRnImm5Encoding16(rn_, rn_, 0);
1945 buffer->Store<int16_t>(location_, mov_encoding >> 16);
1946 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
1947 buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
1948 buffer->Store<int16_t>(location_ + 6u, ldr_encoding);
1949 break;
1950 }
1951 case kLiteral1MiB: {
1952 DCHECK(type_ == kLoadLiteralNarrow);
1953 int32_t offset = GetOffset(code_size);
1954 int32_t mov_encoding = MovModImmEncoding32(rn_, offset & ~0xfff);
1955 int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
1956 int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, offset & 0xfff);
1957 buffer->Store<int16_t>(location_, mov_encoding >> 16);
1958 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
1959 buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
1960 buffer->Store<int16_t>(location_ + 6u, ldr_encoding >> 16);
1961 buffer->Store<int16_t>(location_ + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
1962 break;
1963 }
1964 case kLiteralFar: {
1965 DCHECK(type_ == kLoadLiteralNarrow);
1966 int32_t offset = GetOffset(code_size);
1967 int32_t movw_encoding = MovwEncoding32(rn_, offset & 0xffff);
1968 int32_t movt_encoding = MovtEncoding32(rn_, offset & ~0xffff);
1969 int16_t add_pc_encoding = AddRdnRmEncoding16(rn_, PC);
1970 int32_t ldr_encoding = LdrRtRnImm12Encoding(rn_, rn_, 0);
1971 buffer->Store<int16_t>(location_, movw_encoding >> 16);
1972 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
1973 buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16);
1974 buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
1975 buffer->Store<int16_t>(location_ + 8u, add_pc_encoding);
1976 buffer->Store<int16_t>(location_ + 10u, ldr_encoding >> 16);
1977 buffer->Store<int16_t>(location_ + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
1978 break;
1979 }
1980
1981 case kLongOrFPLiteral1KiB: {
1982 int32_t encoding = LoadWideOrFpEncoding(PC, GetOffset(code_size)); // DCHECKs type_.
1983 buffer->Store<int16_t>(location_, encoding >> 16);
1984 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(encoding & 0xffff));
1985 break;
1986 }
1987 case kLongOrFPLiteral256KiB: {
1988 int32_t offset = GetOffset(code_size);
1989 int32_t mov_encoding = MovModImmEncoding32(IP, offset & ~0x3ff);
1990 int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
1991 int32_t ldr_encoding = LoadWideOrFpEncoding(IP, offset & 0x3ff); // DCHECKs type_.
1992 buffer->Store<int16_t>(location_, mov_encoding >> 16);
1993 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(mov_encoding & 0xffff));
1994 buffer->Store<int16_t>(location_ + 4u, add_pc_encoding);
1995 buffer->Store<int16_t>(location_ + 6u, ldr_encoding >> 16);
1996 buffer->Store<int16_t>(location_ + 8u, static_cast<int16_t>(ldr_encoding & 0xffff));
1997 break;
1998 }
1999 case kLongOrFPLiteralFar: {
2000 int32_t offset = GetOffset(code_size);
2001 int32_t movw_encoding = MovwEncoding32(IP, offset & 0xffff);
2002 int32_t movt_encoding = MovtEncoding32(IP, offset & ~0xffff);
2003 int16_t add_pc_encoding = AddRdnRmEncoding16(IP, PC);
2004 int32_t ldr_encoding = LoadWideOrFpEncoding(IP, 0); // DCHECKs type_.
2005 buffer->Store<int16_t>(location_, movw_encoding >> 16);
2006 buffer->Store<int16_t>(location_ + 2u, static_cast<int16_t>(movw_encoding & 0xffff));
2007 buffer->Store<int16_t>(location_ + 4u, movt_encoding >> 16);
2008 buffer->Store<int16_t>(location_ + 6u, static_cast<int16_t>(movt_encoding & 0xffff));
2009 buffer->Store<int16_t>(location_ + 8u, add_pc_encoding);
2010 buffer->Store<int16_t>(location_ + 10u, ldr_encoding >> 16);
2011 buffer->Store<int16_t>(location_ + 12u, static_cast<int16_t>(ldr_encoding & 0xffff));
2012 break;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002013 }
2014 }
2015}
2016
Dave Allison65fcc2c2014-04-28 13:45:27 -07002017uint16_t Thumb2Assembler::EmitCompareAndBranch(Register rn, uint16_t prev, bool n) {
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00002018 CHECK(IsLowRegister(rn));
Dave Allison65fcc2c2014-04-28 13:45:27 -07002019 uint32_t location = buffer_.Size();
2020
2021 // This is always unresolved as it must be a forward branch.
2022 Emit16(prev); // Previous link.
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002023 return AddFixup(Fixup::CompareAndBranch(location, rn, n ? NE : EQ));
Dave Allison65fcc2c2014-04-28 13:45:27 -07002024}
2025
2026
2027// NOTE: this only support immediate offsets, not [rx,ry].
2028// TODO: support [rx,ry] instructions.
2029void Thumb2Assembler::EmitLoadStore(Condition cond,
2030 bool load,
2031 bool byte,
2032 bool half,
2033 bool is_signed,
2034 Register rd,
2035 const Address& ad) {
2036 CHECK_NE(rd, kNoRegister);
2037 CheckCondition(cond);
2038 bool must_be_32bit = force_32bit_;
2039 if (IsHighRegister(rd)) {
2040 must_be_32bit = true;
2041 }
2042
2043 Register rn = ad.GetRegister();
Dave Allison45fdb932014-06-25 12:37:10 -07002044 if (IsHighRegister(rn) && rn != SP && rn != PC) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07002045 must_be_32bit = true;
2046 }
2047
2048 if (is_signed || ad.GetOffset() < 0 || ad.GetMode() != Address::Offset) {
2049 must_be_32bit = true;
2050 }
2051
Dave Allison45fdb932014-06-25 12:37:10 -07002052 if (ad.IsImmediate()) {
2053 // Immediate offset
2054 int32_t offset = ad.GetOffset();
Dave Allison65fcc2c2014-04-28 13:45:27 -07002055
Dave Allison45fdb932014-06-25 12:37:10 -07002056 // The 16 bit SP relative instruction can only have a 10 bit offset.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002057 if (rn == SP && offset >= (1 << 10)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07002058 must_be_32bit = true;
2059 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002060
2061 if (byte) {
Dave Allison45fdb932014-06-25 12:37:10 -07002062 // 5 bit offset, no shift.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002063 if (offset >= (1 << 5)) {
Dave Allison45fdb932014-06-25 12:37:10 -07002064 must_be_32bit = true;
2065 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002066 } else if (half) {
Dave Allison45fdb932014-06-25 12:37:10 -07002067 // 6 bit offset, shifted by 1.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002068 if (offset >= (1 << 6)) {
Dave Allison45fdb932014-06-25 12:37:10 -07002069 must_be_32bit = true;
2070 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002071 } else {
Dave Allison45fdb932014-06-25 12:37:10 -07002072 // 7 bit offset, shifted by 2.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002073 if (offset >= (1 << 7)) {
Dave Allison45fdb932014-06-25 12:37:10 -07002074 must_be_32bit = true;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002075 }
2076 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002077
Dave Allison45fdb932014-06-25 12:37:10 -07002078 if (must_be_32bit) {
2079 int32_t encoding = B31 | B30 | B29 | B28 | B27 |
2080 (load ? B20 : 0) |
2081 (is_signed ? B24 : 0) |
2082 static_cast<uint32_t>(rd) << 12 |
2083 ad.encodingThumb(true) |
2084 (byte ? 0 : half ? B21 : B22);
2085 Emit32(encoding);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002086 } else {
Dave Allison45fdb932014-06-25 12:37:10 -07002087 // 16 bit thumb1.
2088 uint8_t opA = 0;
2089 bool sp_relative = false;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002090
2091 if (byte) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07002092 opA = 7U /* 0b0111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002093 } else if (half) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07002094 opA = 8U /* 0b1000 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002095 } else {
Dave Allison45fdb932014-06-25 12:37:10 -07002096 if (rn == SP) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07002097 opA = 9U /* 0b1001 */;
Dave Allison45fdb932014-06-25 12:37:10 -07002098 sp_relative = true;
2099 } else {
Andreas Gampec8ccf682014-09-29 20:07:43 -07002100 opA = 6U /* 0b0110 */;
Dave Allison45fdb932014-06-25 12:37:10 -07002101 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002102 }
Dave Allison45fdb932014-06-25 12:37:10 -07002103 int16_t encoding = opA << 12 |
2104 (load ? B11 : 0);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002105
Dave Allison45fdb932014-06-25 12:37:10 -07002106 CHECK_GE(offset, 0);
2107 if (sp_relative) {
2108 // SP relative, 10 bit offset.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002109 CHECK_LT(offset, (1 << 10));
Roland Levillain14d90572015-07-16 10:52:26 +01002110 CHECK_ALIGNED(offset, 4);
Dave Allison45fdb932014-06-25 12:37:10 -07002111 encoding |= rd << 8 | offset >> 2;
2112 } else {
2113 // No SP relative. The offset is shifted right depending on
2114 // the size of the load/store.
2115 encoding |= static_cast<uint32_t>(rd);
2116
2117 if (byte) {
2118 // 5 bit offset, no shift.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002119 CHECK_LT(offset, (1 << 5));
Dave Allison45fdb932014-06-25 12:37:10 -07002120 } else if (half) {
2121 // 6 bit offset, shifted by 1.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002122 CHECK_LT(offset, (1 << 6));
Roland Levillain14d90572015-07-16 10:52:26 +01002123 CHECK_ALIGNED(offset, 2);
Dave Allison45fdb932014-06-25 12:37:10 -07002124 offset >>= 1;
2125 } else {
2126 // 7 bit offset, shifted by 2.
Dave Allison0bb9ade2014-06-26 17:57:36 -07002127 CHECK_LT(offset, (1 << 7));
Roland Levillain14d90572015-07-16 10:52:26 +01002128 CHECK_ALIGNED(offset, 4);
Dave Allison45fdb932014-06-25 12:37:10 -07002129 offset >>= 2;
2130 }
2131 encoding |= rn << 3 | offset << 6;
2132 }
2133
2134 Emit16(encoding);
2135 }
2136 } else {
2137 // Register shift.
2138 if (ad.GetRegister() == PC) {
2139 // PC relative literal encoding.
2140 int32_t offset = ad.GetOffset();
Dave Allison0bb9ade2014-06-26 17:57:36 -07002141 if (must_be_32bit || offset < 0 || offset >= (1 << 10) || !load) {
Dave Allison45fdb932014-06-25 12:37:10 -07002142 int32_t up = B23;
2143 if (offset < 0) {
2144 offset = -offset;
2145 up = 0;
2146 }
2147 CHECK_LT(offset, (1 << 12));
2148 int32_t encoding = 0x1f << 27 | 0xf << 16 | B22 | (load ? B20 : 0) |
2149 offset | up |
2150 static_cast<uint32_t>(rd) << 12;
2151 Emit32(encoding);
2152 } else {
2153 // 16 bit literal load.
2154 CHECK_GE(offset, 0);
2155 CHECK_LT(offset, (1 << 10));
2156 int32_t encoding = B14 | (load ? B11 : 0) | static_cast<uint32_t>(rd) << 8 | offset >> 2;
2157 Emit16(encoding);
2158 }
2159 } else {
2160 if (ad.GetShiftCount() != 0) {
2161 // If there is a shift count this must be 32 bit.
2162 must_be_32bit = true;
2163 } else if (IsHighRegister(ad.GetRegisterOffset())) {
2164 must_be_32bit = true;
2165 }
2166
2167 if (must_be_32bit) {
Nicolas Geoffray1a43dd72014-07-17 15:15:34 +01002168 int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
Dave Allison45fdb932014-06-25 12:37:10 -07002169 ad.encodingThumb(true);
Nicolas Geoffray1a43dd72014-07-17 15:15:34 +01002170 if (half) {
2171 encoding |= B21;
2172 } else if (!byte) {
2173 encoding |= B22;
2174 }
Dave Allison45fdb932014-06-25 12:37:10 -07002175 Emit32(encoding);
2176 } else {
2177 // 16 bit register offset.
2178 int32_t encoding = B14 | B12 | (load ? B11 : 0) | static_cast<uint32_t>(rd) |
2179 ad.encodingThumb(false);
Nicolas Geoffray1a43dd72014-07-17 15:15:34 +01002180 if (byte) {
2181 encoding |= B10;
2182 } else if (half) {
2183 encoding |= B9;
2184 }
Dave Allison45fdb932014-06-25 12:37:10 -07002185 Emit16(encoding);
2186 }
2187 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07002188 }
2189}
2190
2191
2192void Thumb2Assembler::EmitMultiMemOp(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002193 BlockAddressMode bam,
Dave Allison65fcc2c2014-04-28 13:45:27 -07002194 bool load,
2195 Register base,
2196 RegList regs) {
2197 CHECK_NE(base, kNoRegister);
2198 CheckCondition(cond);
2199 bool must_be_32bit = force_32bit_;
2200
Vladimir Markoe8469c12014-11-26 18:09:30 +00002201 if (!must_be_32bit && base == SP && bam == (load ? IA_W : DB_W) &&
2202 (regs & 0xff00 & ~(1 << (load ? PC : LR))) == 0) {
2203 // Use 16-bit PUSH/POP.
2204 int16_t encoding = B15 | B13 | B12 | (load ? B11 : 0) | B10 |
2205 ((regs & (1 << (load ? PC : LR))) != 0 ? B8 : 0) | (regs & 0x00ff);
2206 Emit16(encoding);
2207 return;
2208 }
2209
Dave Allison65fcc2c2014-04-28 13:45:27 -07002210 if ((regs & 0xff00) != 0) {
2211 must_be_32bit = true;
2212 }
2213
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002214 bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002215 // 16 bit always uses writeback.
2216 if (!w_bit) {
2217 must_be_32bit = true;
2218 }
2219
2220 if (must_be_32bit) {
2221 uint32_t op = 0;
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002222 switch (bam) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07002223 case IA:
2224 case IA_W:
Andreas Gampec8ccf682014-09-29 20:07:43 -07002225 op = 1U /* 0b01 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002226 break;
2227 case DB:
2228 case DB_W:
Andreas Gampec8ccf682014-09-29 20:07:43 -07002229 op = 2U /* 0b10 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002230 break;
2231 case DA:
2232 case IB:
2233 case DA_W:
2234 case IB_W:
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07002235 LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
Vladimir Markoe8469c12014-11-26 18:09:30 +00002236 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07002237 }
2238 if (load) {
2239 // Cannot have SP in the list.
2240 CHECK_EQ((regs & (1 << SP)), 0);
2241 } else {
2242 // Cannot have PC or SP in the list.
2243 CHECK_EQ((regs & (1 << PC | 1 << SP)), 0);
2244 }
2245 int32_t encoding = B31 | B30 | B29 | B27 |
2246 (op << 23) |
2247 (load ? B20 : 0) |
2248 base << 16 |
2249 regs |
2250 (w_bit << 21);
2251 Emit32(encoding);
2252 } else {
2253 int16_t encoding = B15 | B14 |
2254 (load ? B11 : 0) |
2255 base << 8 |
2256 regs;
2257 Emit16(encoding);
2258 }
2259}
2260
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002261void Thumb2Assembler::EmitBranch(Condition cond, Label* label, bool link, bool x) {
2262 bool use32bit = IsForced32Bit() || !CanRelocateBranches();
Dave Allison65fcc2c2014-04-28 13:45:27 -07002263 uint32_t pc = buffer_.Size();
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002264 Fixup::Type branch_type;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002265 if (cond == AL) {
2266 if (link) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002267 use32bit = true;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002268 if (x) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002269 branch_type = Fixup::kUnconditionalLinkX; // BLX.
Dave Allison65fcc2c2014-04-28 13:45:27 -07002270 } else {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002271 branch_type = Fixup::kUnconditionalLink; // BX.
Dave Allison65fcc2c2014-04-28 13:45:27 -07002272 }
2273 } else {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002274 branch_type = Fixup::kUnconditional; // B.
Dave Allison65fcc2c2014-04-28 13:45:27 -07002275 }
2276 } else {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002277 branch_type = Fixup::kConditional; // B<cond>.
Dave Allison65fcc2c2014-04-28 13:45:27 -07002278 }
2279
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002280 Fixup::Size size = use32bit ? Fixup::kBranch32Bit : Fixup::kBranch16Bit;
2281 FixupId branch_id = AddFixup(Fixup::Branch(pc, branch_type, size, cond));
2282
Dave Allison65fcc2c2014-04-28 13:45:27 -07002283 if (label->IsBound()) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002284 // The branch is to a bound label which means that it's a backwards branch.
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002285 GetFixup(branch_id)->Resolve(label->Position());
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002286 Emit16(0);
Vladimir Markofbeb4ae2015-06-16 11:32:01 +00002287 } else {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002288 // Branch target is an unbound label. Add it to a singly-linked list maintained within
2289 // the code with the label serving as the head.
2290 Emit16(static_cast<uint16_t>(label->position_));
2291 label->LinkTo(branch_id);
Vladimir Markof38caa62015-05-29 15:50:18 +01002292 }
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002293
2294 if (use32bit) {
2295 Emit16(0);
2296 }
2297 DCHECK_EQ(buffer_.Size() - pc, GetFixup(branch_id)->GetSizeInBytes());
Dave Allison65fcc2c2014-04-28 13:45:27 -07002298}
2299
2300
2301void Thumb2Assembler::clz(Register rd, Register rm, Condition cond) {
2302 CHECK_NE(rd, kNoRegister);
2303 CHECK_NE(rm, kNoRegister);
2304 CheckCondition(cond);
2305 CHECK_NE(rd, PC);
2306 CHECK_NE(rm, PC);
2307 int32_t encoding = B31 | B30 | B29 | B28 | B27 |
2308 B25 | B23 | B21 | B20 |
2309 static_cast<uint32_t>(rm) << 16 |
2310 0xf << 12 |
2311 static_cast<uint32_t>(rd) << 8 |
2312 B7 |
2313 static_cast<uint32_t>(rm);
2314 Emit32(encoding);
2315}
2316
2317
2318void Thumb2Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
2319 CheckCondition(cond);
2320 bool must_be_32bit = force_32bit_;
2321 if (IsHighRegister(rd)|| imm16 >= 256u) {
2322 must_be_32bit = true;
2323 }
2324
2325 if (must_be_32bit) {
2326 // Use encoding T3.
Andreas Gampec8ccf682014-09-29 20:07:43 -07002327 uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
2328 uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
2329 uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002330 uint32_t imm8 = imm16 & 0xff;
2331 int32_t encoding = B31 | B30 | B29 | B28 |
2332 B25 | B22 |
2333 static_cast<uint32_t>(rd) << 8 |
2334 i << 26 |
2335 imm4 << 16 |
2336 imm3 << 12 |
2337 imm8;
2338 Emit32(encoding);
2339 } else {
2340 int16_t encoding = B13 | static_cast<uint16_t>(rd) << 8 |
2341 imm16;
2342 Emit16(encoding);
2343 }
2344}
2345
2346
2347void Thumb2Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
2348 CheckCondition(cond);
2349 // Always 32 bits.
Andreas Gampec8ccf682014-09-29 20:07:43 -07002350 uint32_t imm4 = (imm16 >> 12) & 15U /* 0b1111 */;
2351 uint32_t i = (imm16 >> 11) & 1U /* 0b1 */;
2352 uint32_t imm3 = (imm16 >> 8) & 7U /* 0b111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002353 uint32_t imm8 = imm16 & 0xff;
2354 int32_t encoding = B31 | B30 | B29 | B28 |
2355 B25 | B23 | B22 |
2356 static_cast<uint32_t>(rd) << 8 |
2357 i << 26 |
2358 imm4 << 16 |
2359 imm3 << 12 |
2360 imm8;
2361 Emit32(encoding);
2362}
2363
2364
2365void Thumb2Assembler::ldrex(Register rt, Register rn, uint16_t imm, Condition cond) {
2366 CHECK_NE(rn, kNoRegister);
2367 CHECK_NE(rt, kNoRegister);
2368 CheckCondition(cond);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002369 CHECK_LT(imm, (1u << 10));
2370
2371 int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
2372 static_cast<uint32_t>(rn) << 16 |
2373 static_cast<uint32_t>(rt) << 12 |
2374 0xf << 8 |
2375 imm >> 2;
2376 Emit32(encoding);
2377}
2378
2379
2380void Thumb2Assembler::ldrex(Register rt, Register rn, Condition cond) {
2381 ldrex(rt, rn, 0, cond);
2382}
2383
2384
2385void Thumb2Assembler::strex(Register rd,
2386 Register rt,
2387 Register rn,
2388 uint16_t imm,
2389 Condition cond) {
2390 CHECK_NE(rn, kNoRegister);
2391 CHECK_NE(rd, kNoRegister);
2392 CHECK_NE(rt, kNoRegister);
2393 CheckCondition(cond);
2394 CHECK_LT(imm, (1u << 10));
2395
2396 int32_t encoding = B31 | B30 | B29 | B27 | B22 |
2397 static_cast<uint32_t>(rn) << 16 |
2398 static_cast<uint32_t>(rt) << 12 |
2399 static_cast<uint32_t>(rd) << 8 |
2400 imm >> 2;
2401 Emit32(encoding);
2402}
2403
2404
Calin Juravle52c48962014-12-16 17:02:57 +00002405void Thumb2Assembler::ldrexd(Register rt, Register rt2, Register rn, Condition cond) {
2406 CHECK_NE(rn, kNoRegister);
2407 CHECK_NE(rt, kNoRegister);
2408 CHECK_NE(rt2, kNoRegister);
2409 CHECK_NE(rt, rt2);
2410 CheckCondition(cond);
2411
2412 int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 | B20 |
2413 static_cast<uint32_t>(rn) << 16 |
2414 static_cast<uint32_t>(rt) << 12 |
2415 static_cast<uint32_t>(rt2) << 8 |
2416 B6 | B5 | B4 | B3 | B2 | B1 | B0;
2417 Emit32(encoding);
2418}
2419
2420
Dave Allison65fcc2c2014-04-28 13:45:27 -07002421void Thumb2Assembler::strex(Register rd,
2422 Register rt,
2423 Register rn,
2424 Condition cond) {
2425 strex(rd, rt, rn, 0, cond);
2426}
2427
2428
Calin Juravle52c48962014-12-16 17:02:57 +00002429void Thumb2Assembler::strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond) {
2430 CHECK_NE(rd, kNoRegister);
2431 CHECK_NE(rn, kNoRegister);
2432 CHECK_NE(rt, kNoRegister);
2433 CHECK_NE(rt2, kNoRegister);
2434 CHECK_NE(rt, rt2);
2435 CHECK_NE(rd, rt);
2436 CHECK_NE(rd, rt2);
2437 CheckCondition(cond);
2438
2439 int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 |
2440 static_cast<uint32_t>(rn) << 16 |
2441 static_cast<uint32_t>(rt) << 12 |
2442 static_cast<uint32_t>(rt2) << 8 |
2443 B6 | B5 | B4 |
2444 static_cast<uint32_t>(rd);
2445 Emit32(encoding);
2446}
2447
2448
Dave Allison65fcc2c2014-04-28 13:45:27 -07002449void Thumb2Assembler::clrex(Condition cond) {
2450 CheckCondition(cond);
2451 int32_t encoding = B31 | B30 | B29 | B27 | B28 | B25 | B24 | B23 |
2452 B21 | B20 |
2453 0xf << 16 |
2454 B15 |
2455 0xf << 8 |
2456 B5 |
2457 0xf;
2458 Emit32(encoding);
2459}
2460
2461
2462void Thumb2Assembler::nop(Condition cond) {
2463 CheckCondition(cond);
Andreas Gampec8ccf682014-09-29 20:07:43 -07002464 uint16_t encoding = B15 | B13 | B12 |
Dave Allison65fcc2c2014-04-28 13:45:27 -07002465 B11 | B10 | B9 | B8;
Andreas Gampec8ccf682014-09-29 20:07:43 -07002466 Emit16(static_cast<int16_t>(encoding));
Dave Allison65fcc2c2014-04-28 13:45:27 -07002467}
2468
2469
2470void Thumb2Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
2471 CHECK_NE(sn, kNoSRegister);
2472 CHECK_NE(rt, kNoRegister);
2473 CHECK_NE(rt, SP);
2474 CHECK_NE(rt, PC);
2475 CheckCondition(cond);
2476 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2477 B27 | B26 | B25 |
2478 ((static_cast<int32_t>(sn) >> 1)*B16) |
2479 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
2480 ((static_cast<int32_t>(sn) & 1)*B7) | B4;
2481 Emit32(encoding);
2482}
2483
2484
2485void Thumb2Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
2486 CHECK_NE(sn, kNoSRegister);
2487 CHECK_NE(rt, kNoRegister);
2488 CHECK_NE(rt, SP);
2489 CHECK_NE(rt, PC);
2490 CheckCondition(cond);
2491 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2492 B27 | B26 | B25 | B20 |
2493 ((static_cast<int32_t>(sn) >> 1)*B16) |
2494 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
2495 ((static_cast<int32_t>(sn) & 1)*B7) | B4;
2496 Emit32(encoding);
2497}
2498
2499
2500void Thumb2Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
2501 Condition cond) {
2502 CHECK_NE(sm, kNoSRegister);
2503 CHECK_NE(sm, S31);
2504 CHECK_NE(rt, kNoRegister);
2505 CHECK_NE(rt, SP);
2506 CHECK_NE(rt, PC);
2507 CHECK_NE(rt2, kNoRegister);
2508 CHECK_NE(rt2, SP);
2509 CHECK_NE(rt2, PC);
2510 CheckCondition(cond);
2511 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2512 B27 | B26 | B22 |
2513 (static_cast<int32_t>(rt2)*B16) |
2514 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
2515 ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
2516 (static_cast<int32_t>(sm) >> 1);
2517 Emit32(encoding);
2518}
2519
2520
2521void Thumb2Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
2522 Condition cond) {
2523 CHECK_NE(sm, kNoSRegister);
2524 CHECK_NE(sm, S31);
2525 CHECK_NE(rt, kNoRegister);
2526 CHECK_NE(rt, SP);
2527 CHECK_NE(rt, PC);
2528 CHECK_NE(rt2, kNoRegister);
2529 CHECK_NE(rt2, SP);
2530 CHECK_NE(rt2, PC);
2531 CHECK_NE(rt, rt2);
2532 CheckCondition(cond);
2533 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2534 B27 | B26 | B22 | B20 |
2535 (static_cast<int32_t>(rt2)*B16) |
2536 (static_cast<int32_t>(rt)*B12) | B11 | B9 |
2537 ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
2538 (static_cast<int32_t>(sm) >> 1);
2539 Emit32(encoding);
2540}
2541
2542
2543void Thumb2Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
2544 Condition cond) {
2545 CHECK_NE(dm, kNoDRegister);
2546 CHECK_NE(rt, kNoRegister);
2547 CHECK_NE(rt, SP);
2548 CHECK_NE(rt, PC);
2549 CHECK_NE(rt2, kNoRegister);
2550 CHECK_NE(rt2, SP);
2551 CHECK_NE(rt2, PC);
2552 CheckCondition(cond);
2553 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2554 B27 | B26 | B22 |
2555 (static_cast<int32_t>(rt2)*B16) |
2556 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
2557 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
2558 (static_cast<int32_t>(dm) & 0xf);
2559 Emit32(encoding);
2560}
2561
2562
2563void Thumb2Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
2564 Condition cond) {
2565 CHECK_NE(dm, kNoDRegister);
2566 CHECK_NE(rt, kNoRegister);
2567 CHECK_NE(rt, SP);
2568 CHECK_NE(rt, PC);
2569 CHECK_NE(rt2, kNoRegister);
2570 CHECK_NE(rt2, SP);
2571 CHECK_NE(rt2, PC);
2572 CHECK_NE(rt, rt2);
2573 CheckCondition(cond);
2574 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2575 B27 | B26 | B22 | B20 |
2576 (static_cast<int32_t>(rt2)*B16) |
2577 (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
2578 ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
2579 (static_cast<int32_t>(dm) & 0xf);
2580 Emit32(encoding);
2581}
2582
2583
2584void Thumb2Assembler::vldrs(SRegister sd, const Address& ad, Condition cond) {
2585 const Address& addr = static_cast<const Address&>(ad);
2586 CHECK_NE(sd, kNoSRegister);
2587 CheckCondition(cond);
2588 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2589 B27 | B26 | B24 | B20 |
2590 ((static_cast<int32_t>(sd) & 1)*B22) |
2591 ((static_cast<int32_t>(sd) >> 1)*B12) |
2592 B11 | B9 | addr.vencoding();
2593 Emit32(encoding);
2594}
2595
2596
2597void Thumb2Assembler::vstrs(SRegister sd, const Address& ad, Condition cond) {
2598 const Address& addr = static_cast<const Address&>(ad);
2599 CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
2600 CHECK_NE(sd, kNoSRegister);
2601 CheckCondition(cond);
2602 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2603 B27 | B26 | B24 |
2604 ((static_cast<int32_t>(sd) & 1)*B22) |
2605 ((static_cast<int32_t>(sd) >> 1)*B12) |
2606 B11 | B9 | addr.vencoding();
2607 Emit32(encoding);
2608}
2609
2610
2611void Thumb2Assembler::vldrd(DRegister dd, const Address& ad, Condition cond) {
2612 const Address& addr = static_cast<const Address&>(ad);
2613 CHECK_NE(dd, kNoDRegister);
2614 CheckCondition(cond);
2615 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2616 B27 | B26 | B24 | B20 |
2617 ((static_cast<int32_t>(dd) >> 4)*B22) |
2618 ((static_cast<int32_t>(dd) & 0xf)*B12) |
2619 B11 | B9 | B8 | addr.vencoding();
2620 Emit32(encoding);
2621}
2622
2623
2624void Thumb2Assembler::vstrd(DRegister dd, const Address& ad, Condition cond) {
2625 const Address& addr = static_cast<const Address&>(ad);
2626 CHECK_NE(static_cast<Register>(addr.encodingArm() & (0xf << kRnShift)), PC);
2627 CHECK_NE(dd, kNoDRegister);
2628 CheckCondition(cond);
2629 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2630 B27 | B26 | B24 |
2631 ((static_cast<int32_t>(dd) >> 4)*B22) |
2632 ((static_cast<int32_t>(dd) & 0xf)*B12) |
2633 B11 | B9 | B8 | addr.vencoding();
2634 Emit32(encoding);
2635}
2636
2637
2638void Thumb2Assembler::vpushs(SRegister reg, int nregs, Condition cond) {
2639 EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, false, cond);
2640}
2641
2642
2643void Thumb2Assembler::vpushd(DRegister reg, int nregs, Condition cond) {
2644 EmitVPushPop(static_cast<uint32_t>(reg), nregs, true, true, cond);
2645}
2646
2647
2648void Thumb2Assembler::vpops(SRegister reg, int nregs, Condition cond) {
2649 EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, false, cond);
2650}
2651
2652
2653void Thumb2Assembler::vpopd(DRegister reg, int nregs, Condition cond) {
2654 EmitVPushPop(static_cast<uint32_t>(reg), nregs, false, true, cond);
2655}
2656
2657
2658void Thumb2Assembler::EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond) {
2659 CheckCondition(cond);
2660
2661 uint32_t D;
2662 uint32_t Vd;
2663 if (dbl) {
2664 // Encoded as D:Vd.
2665 D = (reg >> 4) & 1;
Andreas Gampec8ccf682014-09-29 20:07:43 -07002666 Vd = reg & 15U /* 0b1111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002667 } else {
2668 // Encoded as Vd:D.
2669 D = reg & 1;
Andreas Gampec8ccf682014-09-29 20:07:43 -07002670 Vd = (reg >> 1) & 15U /* 0b1111 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002671 }
2672 int32_t encoding = B27 | B26 | B21 | B19 | B18 | B16 |
2673 B11 | B9 |
2674 (dbl ? B8 : 0) |
2675 (push ? B24 : (B23 | B20)) |
Andreas Gampec8ccf682014-09-29 20:07:43 -07002676 14U /* 0b1110 */ << 28 |
Dave Allison65fcc2c2014-04-28 13:45:27 -07002677 nregs << (dbl ? 1 : 0) |
2678 D << 22 |
2679 Vd << 12;
2680 Emit32(encoding);
2681}
2682
2683
2684void Thumb2Assembler::EmitVFPsss(Condition cond, int32_t opcode,
2685 SRegister sd, SRegister sn, SRegister sm) {
2686 CHECK_NE(sd, kNoSRegister);
2687 CHECK_NE(sn, kNoSRegister);
2688 CHECK_NE(sm, kNoSRegister);
2689 CheckCondition(cond);
2690 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2691 B27 | B26 | B25 | B11 | B9 | opcode |
2692 ((static_cast<int32_t>(sd) & 1)*B22) |
2693 ((static_cast<int32_t>(sn) >> 1)*B16) |
2694 ((static_cast<int32_t>(sd) >> 1)*B12) |
2695 ((static_cast<int32_t>(sn) & 1)*B7) |
2696 ((static_cast<int32_t>(sm) & 1)*B5) |
2697 (static_cast<int32_t>(sm) >> 1);
2698 Emit32(encoding);
2699}
2700
2701
2702void Thumb2Assembler::EmitVFPddd(Condition cond, int32_t opcode,
2703 DRegister dd, DRegister dn, DRegister dm) {
2704 CHECK_NE(dd, kNoDRegister);
2705 CHECK_NE(dn, kNoDRegister);
2706 CHECK_NE(dm, kNoDRegister);
2707 CheckCondition(cond);
2708 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2709 B27 | B26 | B25 | B11 | B9 | B8 | opcode |
2710 ((static_cast<int32_t>(dd) >> 4)*B22) |
2711 ((static_cast<int32_t>(dn) & 0xf)*B16) |
2712 ((static_cast<int32_t>(dd) & 0xf)*B12) |
2713 ((static_cast<int32_t>(dn) >> 4)*B7) |
2714 ((static_cast<int32_t>(dm) >> 4)*B5) |
2715 (static_cast<int32_t>(dm) & 0xf);
2716 Emit32(encoding);
2717}
2718
2719
2720void Thumb2Assembler::EmitVFPsd(Condition cond, int32_t opcode,
2721 SRegister sd, DRegister dm) {
2722 CHECK_NE(sd, kNoSRegister);
2723 CHECK_NE(dm, kNoDRegister);
2724 CheckCondition(cond);
2725 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2726 B27 | B26 | B25 | B11 | B9 | opcode |
2727 ((static_cast<int32_t>(sd) & 1)*B22) |
2728 ((static_cast<int32_t>(sd) >> 1)*B12) |
2729 ((static_cast<int32_t>(dm) >> 4)*B5) |
2730 (static_cast<int32_t>(dm) & 0xf);
2731 Emit32(encoding);
2732}
2733
2734
2735void Thumb2Assembler::EmitVFPds(Condition cond, int32_t opcode,
2736 DRegister dd, SRegister sm) {
2737 CHECK_NE(dd, kNoDRegister);
2738 CHECK_NE(sm, kNoSRegister);
2739 CheckCondition(cond);
2740 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2741 B27 | B26 | B25 | B11 | B9 | opcode |
2742 ((static_cast<int32_t>(dd) >> 4)*B22) |
2743 ((static_cast<int32_t>(dd) & 0xf)*B12) |
2744 ((static_cast<int32_t>(sm) & 1)*B5) |
2745 (static_cast<int32_t>(sm) >> 1);
2746 Emit32(encoding);
2747}
2748
2749
2750void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
Calin Juravleddb7df22014-11-25 20:56:51 +00002751 CHECK_NE(cond, kNoCondition);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002752 CheckCondition(cond);
Calin Juravleddb7df22014-11-25 20:56:51 +00002753 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
2754 B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
2755 (static_cast<int32_t>(PC)*B12) |
2756 B11 | B9 | B4;
2757 Emit32(encoding);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002758}
2759
2760
2761void Thumb2Assembler::svc(uint32_t imm8) {
Andreas Gampeab1eb0d2015-02-13 19:23:55 -08002762 CHECK(IsUint<8>(imm8)) << imm8;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002763 int16_t encoding = B15 | B14 | B12 |
2764 B11 | B10 | B9 | B8 |
2765 imm8;
2766 Emit16(encoding);
2767}
2768
2769
2770void Thumb2Assembler::bkpt(uint16_t imm8) {
Andreas Gampeab1eb0d2015-02-13 19:23:55 -08002771 CHECK(IsUint<8>(imm8)) << imm8;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002772 int16_t encoding = B15 | B13 | B12 |
2773 B11 | B10 | B9 |
2774 imm8;
2775 Emit16(encoding);
2776}
2777
2778// Convert the given IT state to a mask bit given bit 0 of the first
2779// condition and a shift position.
2780static uint8_t ToItMask(ItState s, uint8_t firstcond0, uint8_t shift) {
2781 switch (s) {
2782 case kItOmitted: return 1 << shift;
2783 case kItThen: return firstcond0 << shift;
2784 case kItElse: return !firstcond0 << shift;
2785 }
2786 return 0;
2787}
2788
2789
2790// Set the IT condition in the given position for the given state. This is used
2791// to check that conditional instructions match the preceding IT statement.
2792void Thumb2Assembler::SetItCondition(ItState s, Condition cond, uint8_t index) {
2793 switch (s) {
2794 case kItOmitted: it_conditions_[index] = AL; break;
2795 case kItThen: it_conditions_[index] = cond; break;
2796 case kItElse:
2797 it_conditions_[index] = static_cast<Condition>(static_cast<uint8_t>(cond) ^ 1);
2798 break;
2799 }
2800}
2801
2802
2803void Thumb2Assembler::it(Condition firstcond, ItState i1, ItState i2, ItState i3) {
2804 CheckCondition(AL); // Not allowed in IT block.
2805 uint8_t firstcond0 = static_cast<uint8_t>(firstcond) & 1;
2806
2807 // All conditions to AL.
2808 for (uint8_t i = 0; i < 4; ++i) {
2809 it_conditions_[i] = AL;
2810 }
2811
2812 SetItCondition(kItThen, firstcond, 0);
2813 uint8_t mask = ToItMask(i1, firstcond0, 3);
2814 SetItCondition(i1, firstcond, 1);
2815
2816 if (i1 != kItOmitted) {
2817 mask |= ToItMask(i2, firstcond0, 2);
2818 SetItCondition(i2, firstcond, 2);
2819 if (i2 != kItOmitted) {
2820 mask |= ToItMask(i3, firstcond0, 1);
2821 SetItCondition(i3, firstcond, 3);
2822 if (i3 != kItOmitted) {
Andreas Gampec8ccf682014-09-29 20:07:43 -07002823 mask |= 1U /* 0b0001 */;
Dave Allison65fcc2c2014-04-28 13:45:27 -07002824 }
2825 }
2826 }
2827
2828 // Start at first condition.
2829 it_cond_index_ = 0;
2830 next_condition_ = it_conditions_[0];
2831 uint16_t encoding = B15 | B13 | B12 |
2832 B11 | B10 | B9 | B8 |
2833 firstcond << 4 |
2834 mask;
2835 Emit16(encoding);
2836}
2837
2838
2839void Thumb2Assembler::cbz(Register rn, Label* label) {
2840 CheckCondition(AL);
2841 if (label->IsBound()) {
2842 LOG(FATAL) << "cbz can only be used to branch forwards";
Vladimir Markoe8469c12014-11-26 18:09:30 +00002843 UNREACHABLE();
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00002844 } else if (IsHighRegister(rn)) {
2845 LOG(FATAL) << "cbz can only be used with low registers";
2846 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07002847 } else {
2848 uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), false);
2849 label->LinkTo(branchid);
2850 }
2851}
2852
2853
2854void Thumb2Assembler::cbnz(Register rn, Label* label) {
2855 CheckCondition(AL);
2856 if (label->IsBound()) {
2857 LOG(FATAL) << "cbnz can only be used to branch forwards";
Vladimir Markoe8469c12014-11-26 18:09:30 +00002858 UNREACHABLE();
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00002859 } else if (IsHighRegister(rn)) {
2860 LOG(FATAL) << "cbnz can only be used with low registers";
2861 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07002862 } else {
2863 uint16_t branchid = EmitCompareAndBranch(rn, static_cast<uint16_t>(label->position_), true);
2864 label->LinkTo(branchid);
2865 }
2866}
2867
2868
2869void Thumb2Assembler::blx(Register rm, Condition cond) {
2870 CHECK_NE(rm, kNoRegister);
2871 CheckCondition(cond);
2872 int16_t encoding = B14 | B10 | B9 | B8 | B7 | static_cast<int16_t>(rm) << 3;
2873 Emit16(encoding);
2874}
2875
2876
2877void Thumb2Assembler::bx(Register rm, Condition cond) {
2878 CHECK_NE(rm, kNoRegister);
2879 CheckCondition(cond);
2880 int16_t encoding = B14 | B10 | B9 | B8 | static_cast<int16_t>(rm) << 3;
2881 Emit16(encoding);
2882}
2883
2884
2885void Thumb2Assembler::Push(Register rd, Condition cond) {
2886 str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
2887}
2888
2889
2890void Thumb2Assembler::Pop(Register rd, Condition cond) {
2891 ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
2892}
2893
2894
2895void Thumb2Assembler::PushList(RegList regs, Condition cond) {
2896 stm(DB_W, SP, regs, cond);
2897}
2898
2899
2900void Thumb2Assembler::PopList(RegList regs, Condition cond) {
2901 ldm(IA_W, SP, regs, cond);
2902}
2903
2904
2905void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) {
2906 if (cond != AL || rd != rm) {
2907 mov(rd, ShifterOperand(rm), cond);
2908 }
2909}
2910
2911
Dave Allison65fcc2c2014-04-28 13:45:27 -07002912void Thumb2Assembler::Bind(Label* label) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +00002913 BindLabel(label, buffer_.Size());
Dave Allison65fcc2c2014-04-28 13:45:27 -07002914}
2915
2916
2917void Thumb2Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
Dave Allison45fdb932014-06-25 12:37:10 -07002918 bool setcc, Condition cond) {
Calin Juravle9aec02f2014-11-18 23:06:35 +00002919 CHECK_LE(shift_imm, 31u);
Dave Allison45fdb932014-06-25 12:37:10 -07002920 CheckCondition(cond);
2921 EmitShift(rd, rm, LSL, shift_imm, setcc);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002922}
2923
2924
2925void Thumb2Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
Dave Allison45fdb932014-06-25 12:37:10 -07002926 bool setcc, Condition cond) {
Calin Juravle9aec02f2014-11-18 23:06:35 +00002927 CHECK(1u <= shift_imm && shift_imm <= 32u);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002928 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
Dave Allison45fdb932014-06-25 12:37:10 -07002929 CheckCondition(cond);
2930 EmitShift(rd, rm, LSR, shift_imm, setcc);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002931}
2932
2933
2934void Thumb2Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
Dave Allison45fdb932014-06-25 12:37:10 -07002935 bool setcc, Condition cond) {
Calin Juravle9aec02f2014-11-18 23:06:35 +00002936 CHECK(1u <= shift_imm && shift_imm <= 32u);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002937 if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
Dave Allison45fdb932014-06-25 12:37:10 -07002938 CheckCondition(cond);
2939 EmitShift(rd, rm, ASR, shift_imm, setcc);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002940}
2941
2942
2943void Thumb2Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
Dave Allison45fdb932014-06-25 12:37:10 -07002944 bool setcc, Condition cond) {
Calin Juravle9aec02f2014-11-18 23:06:35 +00002945 CHECK(1u <= shift_imm && shift_imm <= 31u);
Dave Allison45fdb932014-06-25 12:37:10 -07002946 CheckCondition(cond);
2947 EmitShift(rd, rm, ROR, shift_imm, setcc);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002948}
2949
2950
Dave Allison45fdb932014-06-25 12:37:10 -07002951void Thumb2Assembler::Rrx(Register rd, Register rm, bool setcc, Condition cond) {
2952 CheckCondition(cond);
2953 EmitShift(rd, rm, RRX, rm, setcc);
2954}
2955
2956
2957void Thumb2Assembler::Lsl(Register rd, Register rm, Register rn,
2958 bool setcc, Condition cond) {
2959 CheckCondition(cond);
2960 EmitShift(rd, rm, LSL, rn, setcc);
2961}
2962
2963
2964void Thumb2Assembler::Lsr(Register rd, Register rm, Register rn,
2965 bool setcc, Condition cond) {
2966 CheckCondition(cond);
2967 EmitShift(rd, rm, LSR, rn, setcc);
2968}
2969
2970
2971void Thumb2Assembler::Asr(Register rd, Register rm, Register rn,
2972 bool setcc, Condition cond) {
2973 CheckCondition(cond);
2974 EmitShift(rd, rm, ASR, rn, setcc);
2975}
2976
2977
2978void Thumb2Assembler::Ror(Register rd, Register rm, Register rn,
2979 bool setcc, Condition cond) {
2980 CheckCondition(cond);
2981 EmitShift(rd, rm, ROR, rn, setcc);
Dave Allison65fcc2c2014-04-28 13:45:27 -07002982}
2983
2984
2985int32_t Thumb2Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
2986 // The offset is off by 4 due to the way the ARM CPUs read PC.
2987 offset -= 4;
2988 offset >>= 1;
2989
2990 uint32_t value = 0;
2991 // There are two different encodings depending on the value of bit 12. In one case
2992 // intermediate values are calculated using the sign bit.
2993 if ((inst & B12) == B12) {
2994 // 25 bits of offset.
2995 uint32_t signbit = (offset >> 31) & 0x1;
2996 uint32_t i1 = (offset >> 22) & 0x1;
2997 uint32_t i2 = (offset >> 21) & 0x1;
2998 uint32_t imm10 = (offset >> 11) & 0x03ff;
2999 uint32_t imm11 = offset & 0x07ff;
3000 uint32_t j1 = (i1 ^ signbit) ? 0 : 1;
3001 uint32_t j2 = (i2 ^ signbit) ? 0 : 1;
3002 value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
3003 imm11;
3004 // Remove the offset from the current encoding.
3005 inst &= ~(0x3ff << 16 | 0x7ff);
3006 } else {
3007 uint32_t signbit = (offset >> 31) & 0x1;
3008 uint32_t imm6 = (offset >> 11) & 0x03f;
3009 uint32_t imm11 = offset & 0x07ff;
3010 uint32_t j1 = (offset >> 19) & 1;
3011 uint32_t j2 = (offset >> 17) & 1;
3012 value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm6 << 16) |
3013 imm11;
3014 // Remove the offset from the current encoding.
3015 inst &= ~(0x3f << 16 | 0x7ff);
3016 }
3017 // Mask out offset bits in current instruction.
3018 inst &= ~(B26 | B13 | B11);
3019 inst |= value;
3020 return inst;
3021}
3022
3023
3024int Thumb2Assembler::DecodeBranchOffset(int32_t instr) {
3025 int32_t imm32;
3026 if ((instr & B12) == B12) {
3027 uint32_t S = (instr >> 26) & 1;
3028 uint32_t J2 = (instr >> 11) & 1;
3029 uint32_t J1 = (instr >> 13) & 1;
3030 uint32_t imm10 = (instr >> 16) & 0x3FF;
3031 uint32_t imm11 = instr & 0x7FF;
3032
3033 uint32_t I1 = ~(J1 ^ S) & 1;
3034 uint32_t I2 = ~(J2 ^ S) & 1;
3035 imm32 = (S << 24) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
3036 imm32 = (imm32 << 8) >> 8; // sign extend 24 bit immediate.
3037 } else {
3038 uint32_t S = (instr >> 26) & 1;
3039 uint32_t J2 = (instr >> 11) & 1;
3040 uint32_t J1 = (instr >> 13) & 1;
3041 uint32_t imm6 = (instr >> 16) & 0x3F;
3042 uint32_t imm11 = instr & 0x7FF;
3043
3044 imm32 = (S << 20) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1);
3045 imm32 = (imm32 << 11) >> 11; // sign extend 21 bit immediate.
3046 }
3047 imm32 += 4;
3048 return imm32;
3049}
3050
Vladimir Markocf93a5c2015-06-16 11:33:24 +00003051uint32_t Thumb2Assembler::GetAdjustedPosition(uint32_t old_position) {
3052 // We can reconstruct the adjustment by going through all the fixups from the beginning
3053 // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
3054 // with increasing old_position, we can use the data from last AdjustedPosition() to
3055 // continue where we left off and the whole loop should be O(m+n) where m is the number
3056 // of positions to adjust and n is the number of fixups.
3057 if (old_position < last_old_position_) {
3058 last_position_adjustment_ = 0u;
3059 last_old_position_ = 0u;
3060 last_fixup_id_ = 0u;
3061 }
3062 while (last_fixup_id_ != fixups_.size()) {
3063 Fixup* fixup = GetFixup(last_fixup_id_);
3064 if (fixup->GetLocation() >= old_position + last_position_adjustment_) {
3065 break;
3066 }
3067 if (fixup->GetSize() != fixup->GetOriginalSize()) {
3068 last_position_adjustment_ += fixup->GetSizeInBytes() - fixup->GetOriginalSizeInBytes();
3069 }
3070 ++last_fixup_id_;
3071 }
3072 last_old_position_ = old_position;
3073 return old_position + last_position_adjustment_;
3074}
3075
3076Literal* Thumb2Assembler::NewLiteral(size_t size, const uint8_t* data) {
3077 DCHECK(size == 4u || size == 8u) << size;
3078 literals_.emplace_back(size, data);
3079 return &literals_.back();
3080}
3081
3082void Thumb2Assembler::LoadLiteral(Register rt, Literal* literal) {
3083 DCHECK_EQ(literal->GetSize(), 4u);
3084 DCHECK(!literal->GetLabel()->IsBound());
3085 bool use32bit = IsForced32Bit() || IsHighRegister(rt);
3086 uint32_t location = buffer_.Size();
3087 Fixup::Size size = use32bit ? Fixup::kLiteral4KiB : Fixup::kLiteral1KiB;
3088 FixupId fixup_id = AddFixup(Fixup::LoadNarrowLiteral(location, rt, size));
3089 Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
3090 literal->GetLabel()->LinkTo(fixup_id);
3091 if (use32bit) {
3092 Emit16(0);
3093 }
3094 DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
3095}
3096
3097void Thumb2Assembler::LoadLiteral(Register rt, Register rt2, Literal* literal) {
3098 DCHECK_EQ(literal->GetSize(), 8u);
3099 DCHECK(!literal->GetLabel()->IsBound());
3100 uint32_t location = buffer_.Size();
3101 FixupId fixup_id =
3102 AddFixup(Fixup::LoadWideLiteral(location, rt, rt2, Fixup::kLongOrFPLiteral1KiB));
3103 Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
3104 literal->GetLabel()->LinkTo(fixup_id);
3105 Emit16(0);
3106 DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
3107}
3108
3109void Thumb2Assembler::LoadLiteral(SRegister sd, Literal* literal) {
3110 DCHECK_EQ(literal->GetSize(), 4u);
3111 DCHECK(!literal->GetLabel()->IsBound());
3112 uint32_t location = buffer_.Size();
3113 FixupId fixup_id = AddFixup(Fixup::LoadSingleLiteral(location, sd, Fixup::kLongOrFPLiteral1KiB));
3114 Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
3115 literal->GetLabel()->LinkTo(fixup_id);
3116 Emit16(0);
3117 DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
3118}
3119
3120void Thumb2Assembler::LoadLiteral(DRegister dd, Literal* literal) {
3121 DCHECK_EQ(literal->GetSize(), 8u);
3122 DCHECK(!literal->GetLabel()->IsBound());
3123 uint32_t location = buffer_.Size();
3124 FixupId fixup_id = AddFixup(Fixup::LoadDoubleLiteral(location, dd, Fixup::kLongOrFPLiteral1KiB));
3125 Emit16(static_cast<uint16_t>(literal->GetLabel()->position_));
3126 literal->GetLabel()->LinkTo(fixup_id);
3127 Emit16(0);
3128 DCHECK_EQ(location + GetFixup(fixup_id)->GetSizeInBytes(), buffer_.Size());
3129}
Dave Allison65fcc2c2014-04-28 13:45:27 -07003130
3131void Thumb2Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
3132 AddConstant(rd, rd, value, cond);
3133}
3134
3135
3136void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
3137 Condition cond) {
3138 if (value == 0) {
3139 if (rd != rn) {
3140 mov(rd, ShifterOperand(rn), cond);
3141 }
3142 return;
3143 }
3144 // We prefer to select the shorter code sequence rather than selecting add for
3145 // positive values and sub for negatives ones, which would slightly improve
3146 // the readability of generated code for some constants.
3147 ShifterOperand shifter_op;
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003148 if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003149 add(rd, rn, shifter_op, cond);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003150 } else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003151 sub(rd, rn, shifter_op, cond);
3152 } else {
3153 CHECK(rn != IP);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003154 if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003155 mvn(IP, shifter_op, cond);
3156 add(rd, rn, ShifterOperand(IP), cond);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003157 } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003158 mvn(IP, shifter_op, cond);
3159 sub(rd, rn, ShifterOperand(IP), cond);
3160 } else {
3161 movw(IP, Low16Bits(value), cond);
3162 uint16_t value_high = High16Bits(value);
3163 if (value_high != 0) {
3164 movt(IP, value_high, cond);
3165 }
3166 add(rd, rn, ShifterOperand(IP), cond);
3167 }
3168 }
3169}
3170
3171
3172void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
3173 Condition cond) {
3174 ShifterOperand shifter_op;
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003175 if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003176 adds(rd, rn, shifter_op, cond);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003177 } else if (ShifterOperandCanHold(rd, rn, ADD, -value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003178 subs(rd, rn, shifter_op, cond);
3179 } else {
3180 CHECK(rn != IP);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003181 if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003182 mvn(IP, shifter_op, cond);
3183 adds(rd, rn, ShifterOperand(IP), cond);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003184 } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003185 mvn(IP, shifter_op, cond);
3186 subs(rd, rn, ShifterOperand(IP), cond);
3187 } else {
3188 movw(IP, Low16Bits(value), cond);
3189 uint16_t value_high = High16Bits(value);
3190 if (value_high != 0) {
3191 movt(IP, value_high, cond);
3192 }
3193 adds(rd, rn, ShifterOperand(IP), cond);
3194 }
3195 }
3196}
3197
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003198
Dave Allison65fcc2c2014-04-28 13:45:27 -07003199void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
3200 ShifterOperand shifter_op;
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003201 if (ShifterOperandCanHold(rd, R0, MOV, value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003202 mov(rd, shifter_op, cond);
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003203 } else if (ShifterOperandCanHold(rd, R0, MVN, ~value, &shifter_op)) {
Dave Allison65fcc2c2014-04-28 13:45:27 -07003204 mvn(rd, shifter_op, cond);
3205 } else {
3206 movw(rd, Low16Bits(value), cond);
3207 uint16_t value_high = High16Bits(value);
3208 if (value_high != 0) {
3209 movt(rd, value_high, cond);
3210 }
3211 }
3212}
3213
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +00003214
Dave Allison65fcc2c2014-04-28 13:45:27 -07003215// Implementation note: this method must emit at most one instruction when
3216// Address::CanHoldLoadOffsetThumb.
3217void Thumb2Assembler::LoadFromOffset(LoadOperandType type,
3218 Register reg,
3219 Register base,
3220 int32_t offset,
3221 Condition cond) {
3222 if (!Address::CanHoldLoadOffsetThumb(type, offset)) {
Roland Levillain775ef492014-11-04 17:43:11 +00003223 CHECK_NE(base, IP);
Dave Allison65fcc2c2014-04-28 13:45:27 -07003224 LoadImmediate(IP, offset, cond);
3225 add(IP, IP, ShifterOperand(base), cond);
3226 base = IP;
3227 offset = 0;
3228 }
3229 CHECK(Address::CanHoldLoadOffsetThumb(type, offset));
3230 switch (type) {
3231 case kLoadSignedByte:
3232 ldrsb(reg, Address(base, offset), cond);
3233 break;
3234 case kLoadUnsignedByte:
3235 ldrb(reg, Address(base, offset), cond);
3236 break;
3237 case kLoadSignedHalfword:
3238 ldrsh(reg, Address(base, offset), cond);
3239 break;
3240 case kLoadUnsignedHalfword:
3241 ldrh(reg, Address(base, offset), cond);
3242 break;
3243 case kLoadWord:
3244 ldr(reg, Address(base, offset), cond);
3245 break;
3246 case kLoadWordPair:
3247 ldrd(reg, Address(base, offset), cond);
3248 break;
3249 default:
3250 LOG(FATAL) << "UNREACHABLE";
Ian Rogers2c4257b2014-10-24 14:20:06 -07003251 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07003252 }
3253}
3254
3255
3256// Implementation note: this method must emit at most one instruction when
3257// Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset.
3258void Thumb2Assembler::LoadSFromOffset(SRegister reg,
3259 Register base,
3260 int32_t offset,
3261 Condition cond) {
3262 if (!Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)) {
3263 CHECK_NE(base, IP);
3264 LoadImmediate(IP, offset, cond);
3265 add(IP, IP, ShifterOperand(base), cond);
3266 base = IP;
3267 offset = 0;
3268 }
3269 CHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset));
3270 vldrs(reg, Address(base, offset), cond);
3271}
3272
3273
3274// Implementation note: this method must emit at most one instruction when
3275// Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset.
3276void Thumb2Assembler::LoadDFromOffset(DRegister reg,
3277 Register base,
3278 int32_t offset,
3279 Condition cond) {
3280 if (!Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)) {
3281 CHECK_NE(base, IP);
3282 LoadImmediate(IP, offset, cond);
3283 add(IP, IP, ShifterOperand(base), cond);
3284 base = IP;
3285 offset = 0;
3286 }
3287 CHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset));
3288 vldrd(reg, Address(base, offset), cond);
3289}
3290
3291
3292// Implementation note: this method must emit at most one instruction when
3293// Address::CanHoldStoreOffsetThumb.
3294void Thumb2Assembler::StoreToOffset(StoreOperandType type,
3295 Register reg,
3296 Register base,
3297 int32_t offset,
3298 Condition cond) {
Roland Levillain775ef492014-11-04 17:43:11 +00003299 Register tmp_reg = kNoRegister;
Dave Allison65fcc2c2014-04-28 13:45:27 -07003300 if (!Address::CanHoldStoreOffsetThumb(type, offset)) {
Roland Levillain775ef492014-11-04 17:43:11 +00003301 CHECK_NE(base, IP);
Roland Levillain4af147e2015-04-07 13:54:49 +01003302 if (reg != IP &&
3303 (type != kStoreWordPair || reg + 1 != IP)) {
Roland Levillain775ef492014-11-04 17:43:11 +00003304 tmp_reg = IP;
3305 } else {
Roland Levillain4af147e2015-04-07 13:54:49 +01003306 // Be careful not to use IP twice (for `reg` (or `reg` + 1 in
3307 // the case of a word-pair store)) and to build the Address
3308 // object used by the store instruction(s) below). Instead,
3309 // save R5 on the stack (or R6 if R5 is not available), use it
3310 // as secondary temporary register, and restore it after the
3311 // store instruction has been emitted.
Roland Levillain775ef492014-11-04 17:43:11 +00003312 tmp_reg = base != R5 ? R5 : R6;
3313 Push(tmp_reg);
3314 if (base == SP) {
3315 offset += kRegisterSize;
3316 }
3317 }
3318 LoadImmediate(tmp_reg, offset, cond);
3319 add(tmp_reg, tmp_reg, ShifterOperand(base), cond);
3320 base = tmp_reg;
Dave Allison65fcc2c2014-04-28 13:45:27 -07003321 offset = 0;
3322 }
3323 CHECK(Address::CanHoldStoreOffsetThumb(type, offset));
3324 switch (type) {
3325 case kStoreByte:
3326 strb(reg, Address(base, offset), cond);
3327 break;
3328 case kStoreHalfword:
3329 strh(reg, Address(base, offset), cond);
3330 break;
3331 case kStoreWord:
3332 str(reg, Address(base, offset), cond);
3333 break;
3334 case kStoreWordPair:
3335 strd(reg, Address(base, offset), cond);
3336 break;
3337 default:
3338 LOG(FATAL) << "UNREACHABLE";
Ian Rogers2c4257b2014-10-24 14:20:06 -07003339 UNREACHABLE();
Dave Allison65fcc2c2014-04-28 13:45:27 -07003340 }
Roland Levillain775ef492014-11-04 17:43:11 +00003341 if (tmp_reg != kNoRegister && tmp_reg != IP) {
3342 DCHECK(tmp_reg == R5 || tmp_reg == R6);
3343 Pop(tmp_reg);
3344 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07003345}
3346
3347
3348// Implementation note: this method must emit at most one instruction when
3349// Address::CanHoldStoreOffsetThumb, as expected by JIT::GuardedStoreToOffset.
3350void Thumb2Assembler::StoreSToOffset(SRegister reg,
3351 Register base,
3352 int32_t offset,
3353 Condition cond) {
3354 if (!Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)) {
3355 CHECK_NE(base, IP);
3356 LoadImmediate(IP, offset, cond);
3357 add(IP, IP, ShifterOperand(base), cond);
3358 base = IP;
3359 offset = 0;
3360 }
3361 CHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset));
3362 vstrs(reg, Address(base, offset), cond);
3363}
3364
3365
3366// Implementation note: this method must emit at most one instruction when
3367// Address::CanHoldStoreOffsetThumb, as expected by JIT::GuardedStoreSToOffset.
3368void Thumb2Assembler::StoreDToOffset(DRegister reg,
3369 Register base,
3370 int32_t offset,
3371 Condition cond) {
3372 if (!Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)) {
3373 CHECK_NE(base, IP);
3374 LoadImmediate(IP, offset, cond);
3375 add(IP, IP, ShifterOperand(base), cond);
3376 base = IP;
3377 offset = 0;
3378 }
3379 CHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset));
3380 vstrd(reg, Address(base, offset), cond);
3381}
3382
3383
3384void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
3385 CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
Nicolas Geoffray19a19cf2014-10-22 16:07:05 +01003386 dmb(SY);
3387}
3388
3389
3390void Thumb2Assembler::dmb(DmbOptions flavor) {
Nicolas Geoffray19a19cf2014-10-22 16:07:05 +01003391 int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
3392 Emit32(encoding | flavor);
Dave Allison65fcc2c2014-04-28 13:45:27 -07003393}
3394
3395
3396void Thumb2Assembler::CompareAndBranchIfZero(Register r, Label* label) {
Nicolas Geoffray2bcb4312015-07-01 12:22:56 +01003397 if (CanRelocateBranches() && IsLowRegister(r) && !label->IsBound()) {
Nicolas Geoffrayd56376c2015-05-21 12:32:34 +00003398 cbz(r, label);
3399 } else {
3400 cmp(r, ShifterOperand(0));
3401 b(label, EQ);
3402 }
3403}
3404
3405
Dave Allison65fcc2c2014-04-28 13:45:27 -07003406void Thumb2Assembler::CompareAndBranchIfNonZero(Register r, Label* label) {
Nicolas Geoffray2bcb4312015-07-01 12:22:56 +01003407 if (CanRelocateBranches() && IsLowRegister(r) && !label->IsBound()) {
Nicolas Geoffrayd126ba12015-05-20 11:25:27 +01003408 cbnz(r, label);
3409 } else {
Nicolas Geoffray1a43dd72014-07-17 15:15:34 +01003410 cmp(r, ShifterOperand(0));
3411 b(label, NE);
Nicolas Geoffray1a43dd72014-07-17 15:15:34 +01003412 }
Dave Allison65fcc2c2014-04-28 13:45:27 -07003413}
3414} // namespace arm
3415} // namespace art