Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "assembler_mips64.h" |
| 18 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame^] | 19 | #include "base/bit_utils.h" |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 20 | #include "base/casts.h" |
| 21 | #include "entrypoints/quick/quick_entrypoints.h" |
| 22 | #include "memory_region.h" |
| 23 | #include "thread.h" |
| 24 | |
| 25 | namespace art { |
| 26 | namespace mips64 { |
| 27 | |
| 28 | void Mips64Assembler::Emit(int32_t value) { |
| 29 | AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
| 30 | buffer_.Emit<int32_t>(value); |
| 31 | } |
| 32 | |
| 33 | void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, |
| 34 | int shamt, int funct) { |
| 35 | CHECK_NE(rs, kNoGpuRegister); |
| 36 | CHECK_NE(rt, kNoGpuRegister); |
| 37 | CHECK_NE(rd, kNoGpuRegister); |
| 38 | int32_t encoding = opcode << kOpcodeShift | |
| 39 | static_cast<int32_t>(rs) << kRsShift | |
| 40 | static_cast<int32_t>(rt) << kRtShift | |
| 41 | static_cast<int32_t>(rd) << kRdShift | |
| 42 | shamt << kShamtShift | |
| 43 | funct; |
| 44 | Emit(encoding); |
| 45 | } |
| 46 | |
| 47 | void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) { |
| 48 | CHECK_NE(rs, kNoGpuRegister); |
| 49 | CHECK_NE(rt, kNoGpuRegister); |
| 50 | int32_t encoding = opcode << kOpcodeShift | |
| 51 | static_cast<int32_t>(rs) << kRsShift | |
| 52 | static_cast<int32_t>(rt) << kRtShift | |
| 53 | imm; |
| 54 | Emit(encoding); |
| 55 | } |
| 56 | |
| 57 | void Mips64Assembler::EmitJ(int opcode, int address) { |
| 58 | int32_t encoding = opcode << kOpcodeShift | |
| 59 | address; |
| 60 | Emit(encoding); |
| 61 | } |
| 62 | |
| 63 | void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, |
| 64 | int funct) { |
| 65 | CHECK_NE(ft, kNoFpuRegister); |
| 66 | CHECK_NE(fs, kNoFpuRegister); |
| 67 | CHECK_NE(fd, kNoFpuRegister); |
| 68 | int32_t encoding = opcode << kOpcodeShift | |
| 69 | fmt << kFmtShift | |
| 70 | static_cast<int32_t>(ft) << kFtShift | |
| 71 | static_cast<int32_t>(fs) << kFsShift | |
| 72 | static_cast<int32_t>(fd) << kFdShift | |
| 73 | funct; |
| 74 | Emit(encoding); |
| 75 | } |
| 76 | |
| 77 | void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm) { |
| 78 | CHECK_NE(rt, kNoFpuRegister); |
| 79 | int32_t encoding = opcode << kOpcodeShift | |
| 80 | fmt << kFmtShift | |
| 81 | static_cast<int32_t>(rt) << kRtShift | |
| 82 | imm; |
| 83 | Emit(encoding); |
| 84 | } |
| 85 | |
| 86 | void Mips64Assembler::EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal) { |
| 87 | int offset; |
| 88 | if (label->IsBound()) { |
| 89 | offset = label->Position() - buffer_.Size(); |
| 90 | } else { |
| 91 | // Use the offset field of the branch instruction for linking the sites. |
| 92 | offset = label->position_; |
| 93 | label->LinkTo(buffer_.Size()); |
| 94 | } |
| 95 | if (equal) { |
| 96 | Beq(rt, rs, (offset >> 2) & kBranchOffsetMask); |
| 97 | } else { |
| 98 | Bne(rt, rs, (offset >> 2) & kBranchOffsetMask); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | void Mips64Assembler::EmitJump(Label* label, bool link) { |
| 103 | int offset; |
| 104 | if (label->IsBound()) { |
| 105 | offset = label->Position() - buffer_.Size(); |
| 106 | } else { |
| 107 | // Use the offset field of the jump instruction for linking the sites. |
| 108 | offset = label->position_; |
| 109 | label->LinkTo(buffer_.Size()); |
| 110 | } |
| 111 | if (link) { |
| 112 | Jal((offset >> 2) & kJumpOffsetMask); |
| 113 | } else { |
| 114 | J((offset >> 2) & kJumpOffsetMask); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | int32_t Mips64Assembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { |
| 119 | CHECK_ALIGNED(offset, 4); |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame^] | 120 | CHECK(IsInt<POPCOUNT(kBranchOffsetMask)>(offset)) << offset; |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 121 | |
| 122 | // Properly preserve only the bits supported in the instruction. |
| 123 | offset >>= 2; |
| 124 | if (is_jump) { |
| 125 | offset &= kJumpOffsetMask; |
| 126 | return (inst & ~kJumpOffsetMask) | offset; |
| 127 | } else { |
| 128 | offset &= kBranchOffsetMask; |
| 129 | return (inst & ~kBranchOffsetMask) | offset; |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | int Mips64Assembler::DecodeBranchOffset(int32_t inst, bool is_jump) { |
| 134 | // Sign-extend, then left-shift by 2. |
| 135 | if (is_jump) { |
| 136 | return (((inst & kJumpOffsetMask) << 6) >> 4); |
| 137 | } else { |
| 138 | return (((inst & kBranchOffsetMask) << 16) >> 14); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | void Mips64Assembler::Bind(Label* label, bool is_jump) { |
| 143 | CHECK(!label->IsBound()); |
| 144 | int bound_pc = buffer_.Size(); |
| 145 | while (label->IsLinked()) { |
| 146 | int32_t position = label->Position(); |
| 147 | int32_t next = buffer_.Load<int32_t>(position); |
| 148 | int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4; |
| 149 | int32_t encoded = Mips64Assembler::EncodeBranchOffset(offset, next, is_jump); |
| 150 | buffer_.Store<int32_t>(position, encoded); |
| 151 | label->position_ = Mips64Assembler::DecodeBranchOffset(next, is_jump); |
| 152 | } |
| 153 | label->BindTo(bound_pc); |
| 154 | } |
| 155 | |
| 156 | void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 157 | EmitR(0, rs, rt, rd, 0, 0x20); |
| 158 | } |
| 159 | |
| 160 | void Mips64Assembler::Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 161 | EmitI(0x8, rs, rt, imm16); |
| 162 | } |
| 163 | |
| 164 | void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 165 | EmitR(0, rs, rt, rd, 0, 0x21); |
| 166 | } |
| 167 | |
| 168 | void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 169 | EmitI(0x9, rs, rt, imm16); |
| 170 | } |
| 171 | |
| 172 | void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 173 | EmitI(0x19, rs, rt, imm16); |
| 174 | } |
| 175 | |
| 176 | void Mips64Assembler::Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 177 | EmitR(0, rs, rt, rd, 0, 0x22); |
| 178 | } |
| 179 | |
| 180 | void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 181 | EmitR(0, rs, rt, rd, 0, 0x23); |
| 182 | } |
| 183 | |
| 184 | void Mips64Assembler::Mult(GpuRegister rs, GpuRegister rt) { |
| 185 | EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18); |
| 186 | } |
| 187 | |
| 188 | void Mips64Assembler::Multu(GpuRegister rs, GpuRegister rt) { |
| 189 | EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19); |
| 190 | } |
| 191 | |
| 192 | void Mips64Assembler::Div(GpuRegister rs, GpuRegister rt) { |
| 193 | EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a); |
| 194 | } |
| 195 | |
| 196 | void Mips64Assembler::Divu(GpuRegister rs, GpuRegister rt) { |
| 197 | EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b); |
| 198 | } |
| 199 | |
| 200 | void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 201 | EmitR(0, rs, rt, rd, 0, 0x24); |
| 202 | } |
| 203 | |
| 204 | void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 205 | EmitI(0xc, rs, rt, imm16); |
| 206 | } |
| 207 | |
| 208 | void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 209 | EmitR(0, rs, rt, rd, 0, 0x25); |
| 210 | } |
| 211 | |
| 212 | void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 213 | EmitI(0xd, rs, rt, imm16); |
| 214 | } |
| 215 | |
| 216 | void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 217 | EmitR(0, rs, rt, rd, 0, 0x26); |
| 218 | } |
| 219 | |
| 220 | void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 221 | EmitI(0xe, rs, rt, imm16); |
| 222 | } |
| 223 | |
| 224 | void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 225 | EmitR(0, rs, rt, rd, 0, 0x27); |
| 226 | } |
| 227 | |
| 228 | void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rs, int shamt) { |
| 229 | EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x00); |
| 230 | } |
| 231 | |
| 232 | void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rs, int shamt) { |
| 233 | EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x02); |
| 234 | } |
| 235 | |
| 236 | void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rs, int shamt) { |
| 237 | EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x03); |
| 238 | } |
| 239 | |
| 240 | void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 241 | EmitR(0, rs, rt, rd, 0, 0x04); |
| 242 | } |
| 243 | |
| 244 | void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 245 | EmitR(0, rs, rt, rd, 0, 0x06); |
| 246 | } |
| 247 | |
| 248 | void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 249 | EmitR(0, rs, rt, rd, 0, 0x07); |
| 250 | } |
| 251 | |
| 252 | void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 253 | EmitI(0x20, rs, rt, imm16); |
| 254 | } |
| 255 | |
| 256 | void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 257 | EmitI(0x21, rs, rt, imm16); |
| 258 | } |
| 259 | |
| 260 | void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 261 | EmitI(0x23, rs, rt, imm16); |
| 262 | } |
| 263 | |
| 264 | void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 265 | EmitI(0x37, rs, rt, imm16); |
| 266 | } |
| 267 | |
| 268 | void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 269 | EmitI(0x24, rs, rt, imm16); |
| 270 | } |
| 271 | |
| 272 | void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 273 | EmitI(0x25, rs, rt, imm16); |
| 274 | } |
| 275 | |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 276 | void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 277 | EmitI(0x27, rs, rt, imm16); |
| 278 | } |
| 279 | |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 280 | void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) { |
| 281 | EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16); |
| 282 | } |
| 283 | |
| 284 | void Mips64Assembler::Mfhi(GpuRegister rd) { |
| 285 | EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10); |
| 286 | } |
| 287 | |
| 288 | void Mips64Assembler::Mflo(GpuRegister rd) { |
| 289 | EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x12); |
| 290 | } |
| 291 | |
| 292 | void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 293 | EmitI(0x28, rs, rt, imm16); |
| 294 | } |
| 295 | |
| 296 | void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 297 | EmitI(0x29, rs, rt, imm16); |
| 298 | } |
| 299 | |
| 300 | void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 301 | EmitI(0x2b, rs, rt, imm16); |
| 302 | } |
| 303 | |
| 304 | void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 305 | EmitI(0x3f, rs, rt, imm16); |
| 306 | } |
| 307 | |
| 308 | void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 309 | EmitR(0, rs, rt, rd, 0, 0x2a); |
| 310 | } |
| 311 | |
| 312 | void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 313 | EmitR(0, rs, rt, rd, 0, 0x2b); |
| 314 | } |
| 315 | |
| 316 | void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 317 | EmitI(0xa, rs, rt, imm16); |
| 318 | } |
| 319 | |
| 320 | void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 321 | EmitI(0xb, rs, rt, imm16); |
| 322 | } |
| 323 | |
| 324 | void Mips64Assembler::Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 325 | EmitI(0x4, rs, rt, imm16); |
| 326 | Nop(); |
| 327 | } |
| 328 | |
| 329 | void Mips64Assembler::Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16) { |
| 330 | EmitI(0x5, rs, rt, imm16); |
| 331 | Nop(); |
| 332 | } |
| 333 | |
| 334 | void Mips64Assembler::J(uint32_t address) { |
| 335 | EmitJ(0x2, address); |
| 336 | Nop(); |
| 337 | } |
| 338 | |
| 339 | void Mips64Assembler::Jal(uint32_t address) { |
| 340 | EmitJ(0x2, address); |
| 341 | Nop(); |
| 342 | } |
| 343 | |
| 344 | void Mips64Assembler::Jr(GpuRegister rs) { |
| 345 | EmitR(0, rs, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), 0, 0x09); // Jalr zero, rs |
| 346 | Nop(); |
| 347 | } |
| 348 | |
| 349 | void Mips64Assembler::Jalr(GpuRegister rs) { |
| 350 | EmitR(0, rs, static_cast<GpuRegister>(0), RA, 0, 0x09); |
| 351 | Nop(); |
| 352 | } |
| 353 | |
| 354 | void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 355 | EmitFR(0x11, 0x10, ft, fs, fd, 0x0); |
| 356 | } |
| 357 | |
| 358 | void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 359 | EmitFR(0x11, 0x10, ft, fs, fd, 0x1); |
| 360 | } |
| 361 | |
| 362 | void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 363 | EmitFR(0x11, 0x10, ft, fs, fd, 0x2); |
| 364 | } |
| 365 | |
| 366 | void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 367 | EmitFR(0x11, 0x10, ft, fs, fd, 0x3); |
| 368 | } |
| 369 | |
| 370 | void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 371 | EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs), |
| 372 | static_cast<FpuRegister>(fd), 0x0); |
| 373 | } |
| 374 | |
| 375 | void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 376 | EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs), |
| 377 | static_cast<FpuRegister>(fd), 0x1); |
| 378 | } |
| 379 | |
| 380 | void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 381 | EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs), |
| 382 | static_cast<FpuRegister>(fd), 0x2); |
| 383 | } |
| 384 | |
| 385 | void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) { |
| 386 | EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs), |
| 387 | static_cast<FpuRegister>(fd), 0x3); |
| 388 | } |
| 389 | |
| 390 | void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) { |
| 391 | EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6); |
| 392 | } |
| 393 | |
| 394 | void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) { |
| 395 | EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), static_cast<FpuRegister>(fs), |
| 396 | static_cast<FpuRegister>(fd), 0x6); |
| 397 | } |
| 398 | |
| 399 | void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) { |
| 400 | EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0); |
| 401 | } |
| 402 | |
| 403 | void Mips64Assembler::Mtc1(FpuRegister ft, GpuRegister rs) { |
| 404 | EmitFR(0x11, 0x04, ft, static_cast<FpuRegister>(rs), static_cast<FpuRegister>(0), 0x0); |
| 405 | } |
| 406 | |
| 407 | void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) { |
| 408 | EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16); |
| 409 | } |
| 410 | |
| 411 | void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) { |
| 412 | EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16); |
| 413 | } |
| 414 | |
| 415 | void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) { |
| 416 | EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16); |
| 417 | } |
| 418 | |
| 419 | void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) { |
| 420 | EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16); |
| 421 | } |
| 422 | |
| 423 | void Mips64Assembler::Break() { |
| 424 | EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), |
| 425 | static_cast<GpuRegister>(0), 0, 0xD); |
| 426 | } |
| 427 | |
| 428 | void Mips64Assembler::Nop() { |
| 429 | EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), |
| 430 | static_cast<GpuRegister>(0), 0, 0x0); |
| 431 | } |
| 432 | |
| 433 | void Mips64Assembler::Move(GpuRegister rt, GpuRegister rs) { |
| 434 | EmitI(0x19, rs, rt, 0); // Daddiu |
| 435 | } |
| 436 | |
| 437 | void Mips64Assembler::Clear(GpuRegister rt) { |
| 438 | EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rt, 0, 0x20); |
| 439 | } |
| 440 | |
| 441 | void Mips64Assembler::Not(GpuRegister rt, GpuRegister rs) { |
| 442 | EmitR(0, static_cast<GpuRegister>(0), rs, rt, 0, 0x27); |
| 443 | } |
| 444 | |
| 445 | void Mips64Assembler::Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 446 | Mult(rs, rt); |
| 447 | Mflo(rd); |
| 448 | } |
| 449 | |
| 450 | void Mips64Assembler::Div(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 451 | Div(rs, rt); |
| 452 | Mflo(rd); |
| 453 | } |
| 454 | |
| 455 | void Mips64Assembler::Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt) { |
| 456 | Div(rs, rt); |
| 457 | Mfhi(rd); |
| 458 | } |
| 459 | |
| 460 | void Mips64Assembler::AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value) { |
| 461 | CHECK((value >= -32768) && (value <= 32766)); |
| 462 | Daddiu(rt, rs, value); |
| 463 | } |
| 464 | |
| 465 | void Mips64Assembler::LoadImmediate64(GpuRegister rt, int32_t value) { |
| 466 | CHECK((value >= -32768) && (value <= 32766)); |
| 467 | Daddiu(rt, ZERO, value); |
| 468 | } |
| 469 | |
| 470 | void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, |
| 471 | int32_t offset) { |
| 472 | switch (type) { |
| 473 | case kLoadSignedByte: |
| 474 | Lb(reg, base, offset); |
| 475 | break; |
| 476 | case kLoadUnsignedByte: |
| 477 | Lbu(reg, base, offset); |
| 478 | break; |
| 479 | case kLoadSignedHalfword: |
| 480 | Lh(reg, base, offset); |
| 481 | break; |
| 482 | case kLoadUnsignedHalfword: |
| 483 | Lhu(reg, base, offset); |
| 484 | break; |
| 485 | case kLoadWord: |
| 486 | Lw(reg, base, offset); |
| 487 | break; |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 488 | case kLoadUnsignedWord: |
| 489 | Lwu(reg, base, offset); |
| 490 | break; |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 491 | case kLoadDoubleword: |
| 492 | // TODO: alignment issues ??? |
| 493 | Ld(reg, base, offset); |
| 494 | break; |
| 495 | default: |
| 496 | LOG(FATAL) << "UNREACHABLE"; |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, |
| 501 | int32_t offset) { |
| 502 | CHECK((offset >= -32768) && (offset <= 32766)); |
| 503 | switch (type) { |
| 504 | case kLoadWord: |
| 505 | Lwc1(reg, base, offset); |
| 506 | break; |
| 507 | case kLoadDoubleword: |
| 508 | // TODO: alignment issues ??? |
| 509 | Ldc1(reg, base, offset); |
| 510 | break; |
| 511 | default: |
| 512 | LOG(FATAL) << "UNREACHABLE"; |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, |
| 517 | size_t size) { |
| 518 | Mips64ManagedRegister dst = m_dst.AsMips64(); |
| 519 | if (dst.IsNoRegister()) { |
| 520 | CHECK_EQ(0u, size) << dst; |
| 521 | } else if (dst.IsGpuRegister()) { |
| 522 | if (size == 4) { |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 523 | LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset); |
| 524 | } else if (size == 8) { |
| 525 | CHECK_EQ(8u, size) << dst; |
| 526 | LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset); |
| 527 | } else { |
| 528 | UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8"; |
| 529 | } |
| 530 | } else if (dst.IsFpuRegister()) { |
| 531 | if (size == 4) { |
| 532 | CHECK_EQ(4u, size) << dst; |
| 533 | LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset); |
| 534 | } else if (size == 8) { |
| 535 | CHECK_EQ(8u, size) << dst; |
| 536 | LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset); |
| 537 | } else { |
| 538 | UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8"; |
| 539 | } |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, |
| 544 | int32_t offset) { |
| 545 | switch (type) { |
| 546 | case kStoreByte: |
| 547 | Sb(reg, base, offset); |
| 548 | break; |
| 549 | case kStoreHalfword: |
| 550 | Sh(reg, base, offset); |
| 551 | break; |
| 552 | case kStoreWord: |
| 553 | Sw(reg, base, offset); |
| 554 | break; |
| 555 | case kStoreDoubleword: |
| 556 | // TODO: alignment issues ??? |
| 557 | Sd(reg, base, offset); |
| 558 | break; |
| 559 | default: |
| 560 | LOG(FATAL) << "UNREACHABLE"; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, |
| 565 | int32_t offset) { |
| 566 | switch (type) { |
| 567 | case kStoreWord: |
| 568 | Swc1(reg, base, offset); |
| 569 | break; |
| 570 | case kStoreDoubleword: |
| 571 | Sdc1(reg, base, offset); |
| 572 | break; |
| 573 | default: |
| 574 | LOG(FATAL) << "UNREACHABLE"; |
| 575 | } |
| 576 | } |
| 577 | |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 578 | static dwarf::Reg DWARFReg(GpuRegister reg) { |
| 579 | return dwarf::Reg::Mips64Core(static_cast<int>(reg)); |
| 580 | } |
| 581 | |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 582 | constexpr size_t kFramePointerSize = 8; |
| 583 | |
| 584 | void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, |
| 585 | const std::vector<ManagedRegister>& callee_save_regs, |
| 586 | const ManagedRegisterEntrySpills& entry_spills) { |
| 587 | CHECK_ALIGNED(frame_size, kStackAlignment); |
| 588 | |
| 589 | // Increase frame to required size. |
| 590 | IncreaseFrameSize(frame_size); |
| 591 | |
| 592 | // Push callee saves and return address |
| 593 | int stack_offset = frame_size - kFramePointerSize; |
| 594 | StoreToOffset(kStoreDoubleword, RA, SP, stack_offset); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 595 | cfi_.RelOffset(DWARFReg(RA), stack_offset); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 596 | for (int i = callee_save_regs.size() - 1; i >= 0; --i) { |
| 597 | stack_offset -= kFramePointerSize; |
| 598 | GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister(); |
| 599 | StoreToOffset(kStoreDoubleword, reg, SP, stack_offset); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 600 | cfi_.RelOffset(DWARFReg(reg), stack_offset); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 601 | } |
| 602 | |
| 603 | // Write out Method*. |
| 604 | StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0); |
| 605 | |
| 606 | // Write out entry spills. |
| 607 | int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); |
| 608 | for (size_t i = 0; i < entry_spills.size(); ++i) { |
| 609 | Mips64ManagedRegister reg = entry_spills.at(i).AsMips64(); |
| 610 | ManagedRegisterSpill spill = entry_spills.at(i); |
| 611 | int32_t size = spill.getSize(); |
| 612 | if (reg.IsNoRegister()) { |
| 613 | // only increment stack offset. |
| 614 | offset += size; |
| 615 | } else if (reg.IsFpuRegister()) { |
| 616 | StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsFpuRegister(), SP, offset); |
| 617 | offset += size; |
| 618 | } else if (reg.IsGpuRegister()) { |
| 619 | StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsGpuRegister(), SP, offset); |
| 620 | offset += size; |
| 621 | } |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | void Mips64Assembler::RemoveFrame(size_t frame_size, |
| 626 | const std::vector<ManagedRegister>& callee_save_regs) { |
| 627 | CHECK_ALIGNED(frame_size, kStackAlignment); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 628 | cfi_.RememberState(); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 629 | |
| 630 | // Pop callee saves and return address |
| 631 | int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize; |
| 632 | for (size_t i = 0; i < callee_save_regs.size(); ++i) { |
| 633 | GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister(); |
| 634 | LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 635 | cfi_.Restore(DWARFReg(reg)); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 636 | stack_offset += kFramePointerSize; |
| 637 | } |
| 638 | LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 639 | cfi_.Restore(DWARFReg(RA)); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 640 | |
| 641 | // Decrease frame to required size. |
| 642 | DecreaseFrameSize(frame_size); |
| 643 | |
| 644 | // Then jump to the return address. |
| 645 | Jr(RA); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 646 | |
| 647 | // The CFI should be restored for any code that follows the exit block. |
| 648 | cfi_.RestoreState(); |
| 649 | cfi_.DefCFAOffset(frame_size); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | void Mips64Assembler::IncreaseFrameSize(size_t adjust) { |
| 653 | CHECK_ALIGNED(adjust, kStackAlignment); |
| 654 | AddConstant64(SP, SP, -adjust); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 655 | cfi_.AdjustCFAOffset(adjust); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 656 | } |
| 657 | |
| 658 | void Mips64Assembler::DecreaseFrameSize(size_t adjust) { |
| 659 | CHECK_ALIGNED(adjust, kStackAlignment); |
| 660 | AddConstant64(SP, SP, adjust); |
David Srbecky | dd97393 | 2015-04-07 20:29:48 +0100 | [diff] [blame] | 661 | cfi_.AdjustCFAOffset(-adjust); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { |
| 665 | Mips64ManagedRegister src = msrc.AsMips64(); |
| 666 | if (src.IsNoRegister()) { |
| 667 | CHECK_EQ(0u, size); |
| 668 | } else if (src.IsGpuRegister()) { |
| 669 | CHECK(size == 4 || size == 8) << size; |
| 670 | if (size == 8) { |
| 671 | StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value()); |
| 672 | } else if (size == 4) { |
| 673 | StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value()); |
| 674 | } else { |
| 675 | UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8"; |
| 676 | } |
| 677 | } else if (src.IsFpuRegister()) { |
| 678 | CHECK(size == 4 || size == 8) << size; |
| 679 | if (size == 8) { |
| 680 | StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value()); |
| 681 | } else if (size == 4) { |
| 682 | StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value()); |
| 683 | } else { |
| 684 | UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8"; |
| 685 | } |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { |
| 690 | Mips64ManagedRegister src = msrc.AsMips64(); |
| 691 | CHECK(src.IsGpuRegister()); |
| 692 | StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value()); |
| 693 | } |
| 694 | |
| 695 | void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { |
| 696 | Mips64ManagedRegister src = msrc.AsMips64(); |
| 697 | CHECK(src.IsGpuRegister()); |
| 698 | StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value()); |
| 699 | } |
| 700 | |
| 701 | void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, |
| 702 | ManagedRegister mscratch) { |
| 703 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 704 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 705 | LoadImmediate64(scratch.AsGpuRegister(), imm); |
| 706 | StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); |
| 707 | } |
| 708 | |
| 709 | void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, |
| 710 | ManagedRegister mscratch) { |
| 711 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 712 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 713 | LoadImmediate64(scratch.AsGpuRegister(), imm); |
| 714 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value()); |
| 715 | } |
| 716 | |
| 717 | void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, |
| 718 | FrameOffset fr_offs, |
| 719 | ManagedRegister mscratch) { |
| 720 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 721 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 722 | AddConstant64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); |
| 723 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); |
| 724 | } |
| 725 | |
| 726 | void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) { |
| 727 | StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value()); |
| 728 | } |
| 729 | |
| 730 | void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, |
| 731 | FrameOffset in_off, ManagedRegister mscratch) { |
| 732 | Mips64ManagedRegister src = msrc.AsMips64(); |
| 733 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 734 | StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value()); |
| 735 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value()); |
| 736 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8); |
| 737 | } |
| 738 | |
| 739 | void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { |
| 740 | return EmitLoad(mdest, SP, src.Int32Value(), size); |
| 741 | } |
| 742 | |
| 743 | void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) { |
| 744 | return EmitLoad(mdest, S1, src.Int32Value(), size); |
| 745 | } |
| 746 | |
| 747 | void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { |
| 748 | Mips64ManagedRegister dest = mdest.AsMips64(); |
| 749 | CHECK(dest.IsGpuRegister()); |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 750 | LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value()); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 751 | } |
| 752 | |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 753 | void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) { |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 754 | Mips64ManagedRegister dest = mdest.AsMips64(); |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 755 | CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister()); |
| 756 | LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 757 | base.AsMips64().AsGpuRegister(), offs.Int32Value()); |
| 758 | if (kPoisonHeapReferences) { |
| 759 | Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister()); |
| 760 | } |
| 761 | } |
| 762 | |
| 763 | void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, |
| 764 | Offset offs) { |
| 765 | Mips64ManagedRegister dest = mdest.AsMips64(); |
| 766 | CHECK(dest.IsGpuRegister() && dest.IsGpuRegister()) << dest; |
| 767 | LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), |
| 768 | base.AsMips64().AsGpuRegister(), offs.Int32Value()); |
| 769 | } |
| 770 | |
| 771 | void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, |
| 772 | ThreadOffset<8> offs) { |
| 773 | Mips64ManagedRegister dest = mdest.AsMips64(); |
| 774 | CHECK(dest.IsGpuRegister()); |
| 775 | LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value()); |
| 776 | } |
| 777 | |
| 778 | void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { |
| 779 | UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips"; |
| 780 | } |
| 781 | |
| 782 | void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { |
| 783 | UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips"; |
| 784 | } |
| 785 | |
| 786 | void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) { |
| 787 | Mips64ManagedRegister dest = mdest.AsMips64(); |
| 788 | Mips64ManagedRegister src = msrc.AsMips64(); |
| 789 | if (!dest.Equals(src)) { |
| 790 | if (dest.IsGpuRegister()) { |
| 791 | CHECK(src.IsGpuRegister()) << src; |
| 792 | Move(dest.AsGpuRegister(), src.AsGpuRegister()); |
| 793 | } else if (dest.IsFpuRegister()) { |
| 794 | CHECK(src.IsFpuRegister()) << src; |
| 795 | if (size == 4) { |
| 796 | MovS(dest.AsFpuRegister(), src.AsFpuRegister()); |
| 797 | } else if (size == 8) { |
| 798 | MovD(dest.AsFpuRegister(), src.AsFpuRegister()); |
| 799 | } else { |
| 800 | UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; |
| 801 | } |
| 802 | } |
| 803 | } |
| 804 | } |
| 805 | |
| 806 | void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src, |
| 807 | ManagedRegister mscratch) { |
| 808 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 809 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 810 | LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value()); |
| 811 | StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); |
| 812 | } |
| 813 | |
| 814 | void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, |
| 815 | ThreadOffset<8> thr_offs, |
| 816 | ManagedRegister mscratch) { |
| 817 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 818 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 819 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); |
| 820 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); |
| 821 | } |
| 822 | |
| 823 | void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, |
| 824 | FrameOffset fr_offs, |
| 825 | ManagedRegister mscratch) { |
| 826 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 827 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 828 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), |
| 829 | SP, fr_offs.Int32Value()); |
| 830 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), |
| 831 | S1, thr_offs.Int32Value()); |
| 832 | } |
| 833 | |
| 834 | void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src, |
| 835 | ManagedRegister mscratch, size_t size) { |
| 836 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 837 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 838 | CHECK(size == 4 || size == 8) << size; |
| 839 | if (size == 4) { |
| 840 | LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value()); |
| 841 | StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); |
| 842 | } else if (size == 8) { |
| 843 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value()); |
| 844 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value()); |
| 845 | } else { |
| 846 | UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; |
| 847 | } |
| 848 | } |
| 849 | |
| 850 | void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, |
| 851 | ManagedRegister mscratch, size_t size) { |
| 852 | GpuRegister scratch = mscratch.AsMips64().AsGpuRegister(); |
| 853 | CHECK(size == 4 || size == 8) << size; |
| 854 | if (size == 4) { |
| 855 | LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(), |
| 856 | src_offset.Int32Value()); |
| 857 | StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); |
| 858 | } else if (size == 8) { |
| 859 | LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(), |
| 860 | src_offset.Int32Value()); |
| 861 | StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value()); |
| 862 | } else { |
| 863 | UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; |
| 864 | } |
| 865 | } |
| 866 | |
| 867 | void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, |
| 868 | ManagedRegister mscratch, size_t size) { |
| 869 | GpuRegister scratch = mscratch.AsMips64().AsGpuRegister(); |
| 870 | CHECK(size == 4 || size == 8) << size; |
| 871 | if (size == 4) { |
| 872 | LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); |
| 873 | StoreToOffset(kStoreWord, scratch, dest_base.AsMips64().AsGpuRegister(), |
| 874 | dest_offset.Int32Value()); |
| 875 | } else if (size == 8) { |
| 876 | LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value()); |
| 877 | StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(), |
| 878 | dest_offset.Int32Value()); |
| 879 | } else { |
| 880 | UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, |
| 885 | ManagedRegister /*mscratch*/, size_t /*size*/) { |
| 886 | UNIMPLEMENTED(FATAL) << "no mips64 implementation"; |
| 887 | } |
| 888 | |
| 889 | void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset, |
| 890 | ManagedRegister src, Offset src_offset, |
| 891 | ManagedRegister mscratch, size_t size) { |
| 892 | GpuRegister scratch = mscratch.AsMips64().AsGpuRegister(); |
| 893 | CHECK(size == 4 || size == 8) << size; |
| 894 | if (size == 4) { |
| 895 | LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value()); |
| 896 | StoreToOffset(kStoreWord, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value()); |
| 897 | } else if (size == 8) { |
| 898 | LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(), |
| 899 | src_offset.Int32Value()); |
| 900 | StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(), |
| 901 | dest_offset.Int32Value()); |
| 902 | } else { |
| 903 | UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; |
| 904 | } |
| 905 | } |
| 906 | |
| 907 | void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset |
| 908 | /*src_offset*/, |
| 909 | ManagedRegister /*mscratch*/, size_t /*size*/) { |
| 910 | UNIMPLEMENTED(FATAL) << "no mips64 implementation"; |
| 911 | } |
| 912 | |
| 913 | void Mips64Assembler::MemoryBarrier(ManagedRegister) { |
| 914 | UNIMPLEMENTED(FATAL) << "no mips64 implementation"; |
| 915 | } |
| 916 | |
| 917 | void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg, |
| 918 | FrameOffset handle_scope_offset, |
| 919 | ManagedRegister min_reg, bool null_allowed) { |
| 920 | Mips64ManagedRegister out_reg = mout_reg.AsMips64(); |
| 921 | Mips64ManagedRegister in_reg = min_reg.AsMips64(); |
| 922 | CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg; |
| 923 | CHECK(out_reg.IsGpuRegister()) << out_reg; |
| 924 | if (null_allowed) { |
| 925 | Label null_arg; |
| 926 | // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is |
| 927 | // the address in the handle scope holding the reference. |
| 928 | // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) |
| 929 | if (in_reg.IsNoRegister()) { |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 930 | LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(), |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 931 | SP, handle_scope_offset.Int32Value()); |
| 932 | in_reg = out_reg; |
| 933 | } |
| 934 | if (!out_reg.Equals(in_reg)) { |
| 935 | LoadImmediate64(out_reg.AsGpuRegister(), 0); |
| 936 | } |
| 937 | EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true); |
| 938 | AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value()); |
| 939 | Bind(&null_arg, false); |
| 940 | } else { |
| 941 | AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value()); |
| 942 | } |
| 943 | } |
| 944 | |
| 945 | void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off, |
| 946 | FrameOffset handle_scope_offset, |
| 947 | ManagedRegister mscratch, |
| 948 | bool null_allowed) { |
| 949 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 950 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 951 | if (null_allowed) { |
| 952 | Label null_arg; |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 953 | LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP, |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 954 | handle_scope_offset.Int32Value()); |
| 955 | // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is |
| 956 | // the address in the handle scope holding the reference. |
| 957 | // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset) |
| 958 | EmitBranch(scratch.AsGpuRegister(), ZERO, &null_arg, true); |
| 959 | AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value()); |
| 960 | Bind(&null_arg, false); |
| 961 | } else { |
| 962 | AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value()); |
| 963 | } |
| 964 | StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value()); |
| 965 | } |
| 966 | |
| 967 | // Given a handle scope entry, load the associated reference. |
| 968 | void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg, |
| 969 | ManagedRegister min_reg) { |
| 970 | Mips64ManagedRegister out_reg = mout_reg.AsMips64(); |
| 971 | Mips64ManagedRegister in_reg = min_reg.AsMips64(); |
| 972 | CHECK(out_reg.IsGpuRegister()) << out_reg; |
| 973 | CHECK(in_reg.IsGpuRegister()) << in_reg; |
| 974 | Label null_arg; |
| 975 | if (!out_reg.Equals(in_reg)) { |
| 976 | LoadImmediate64(out_reg.AsGpuRegister(), 0); |
| 977 | } |
| 978 | EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true); |
| 979 | LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(), |
| 980 | in_reg.AsGpuRegister(), 0); |
| 981 | Bind(&null_arg, false); |
| 982 | } |
| 983 | |
| 984 | void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { |
| 985 | // TODO: not validating references |
| 986 | } |
| 987 | |
| 988 | void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { |
| 989 | // TODO: not validating references |
| 990 | } |
| 991 | |
| 992 | void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) { |
| 993 | Mips64ManagedRegister base = mbase.AsMips64(); |
| 994 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 995 | CHECK(base.IsGpuRegister()) << base; |
| 996 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 997 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), |
| 998 | base.AsGpuRegister(), offset.Int32Value()); |
| 999 | Jalr(scratch.AsGpuRegister()); |
| 1000 | // TODO: place reference map on call |
| 1001 | } |
| 1002 | |
| 1003 | void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { |
| 1004 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 1005 | CHECK(scratch.IsGpuRegister()) << scratch; |
| 1006 | // Call *(*(SP + base) + offset) |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 1007 | LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 1008 | SP, base.Int32Value()); |
| 1009 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), |
| 1010 | scratch.AsGpuRegister(), offset.Int32Value()); |
| 1011 | Jalr(scratch.AsGpuRegister()); |
| 1012 | // TODO: place reference map on call |
| 1013 | } |
| 1014 | |
| 1015 | void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) { |
| 1016 | UNIMPLEMENTED(FATAL) << "no mips64 implementation"; |
| 1017 | } |
| 1018 | |
| 1019 | void Mips64Assembler::GetCurrentThread(ManagedRegister tr) { |
| 1020 | Move(tr.AsMips64().AsGpuRegister(), S1); |
| 1021 | } |
| 1022 | |
| 1023 | void Mips64Assembler::GetCurrentThread(FrameOffset offset, |
| 1024 | ManagedRegister /*mscratch*/) { |
| 1025 | StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value()); |
| 1026 | } |
| 1027 | |
| 1028 | void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { |
| 1029 | Mips64ManagedRegister scratch = mscratch.AsMips64(); |
| 1030 | Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust); |
| 1031 | buffer_.EnqueueSlowPath(slow); |
| 1032 | LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), |
| 1033 | S1, Thread::ExceptionOffset<8>().Int32Value()); |
| 1034 | EmitBranch(scratch.AsGpuRegister(), ZERO, slow->Entry(), false); |
| 1035 | } |
| 1036 | |
| 1037 | void Mips64ExceptionSlowPath::Emit(Assembler* sasm) { |
| 1038 | Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm); |
| 1039 | #define __ sp_asm-> |
| 1040 | __ Bind(&entry_, false); |
| 1041 | if (stack_adjust_ != 0) { // Fix up the frame. |
| 1042 | __ DecreaseFrameSize(stack_adjust_); |
| 1043 | } |
| 1044 | // Pass exception object as argument |
| 1045 | // Don't care about preserving A0 as this call won't return |
| 1046 | __ Move(A0, scratch_.AsGpuRegister()); |
| 1047 | // Set up call to Thread::Current()->pDeliverException |
| 1048 | __ LoadFromOffset(kLoadDoubleword, T9, S1, |
Goran Jakovljevic | 75c40d4 | 2015-04-03 15:45:21 +0200 | [diff] [blame] | 1049 | QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 1050 | __ Jr(T9); |
| 1051 | // Call never returns |
| 1052 | __ Break(); |
| 1053 | #undef __ |
| 1054 | } |
| 1055 | |
| 1056 | } // namespace mips64 |
| 1057 | } // namespace art |