Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "scheduler_arm64.h" |
| 18 | #include "code_generator_utils.h" |
Artem Serov | f0fc4c6 | 2017-05-03 15:07:15 +0100 | [diff] [blame^] | 19 | #include "mirror/array-inl.h" |
Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 20 | |
| 21 | namespace art { |
| 22 | namespace arm64 { |
| 23 | |
| 24 | void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) { |
| 25 | last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) |
| 26 | ? kArm64FloatingPointOpLatency |
| 27 | : kArm64IntegerOpLatency; |
| 28 | } |
| 29 | |
| 30 | void SchedulingLatencyVisitorARM64::VisitBitwiseNegatedRight( |
| 31 | HBitwiseNegatedRight* ATTRIBUTE_UNUSED) { |
| 32 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 33 | } |
| 34 | |
Anton Kirilov | 74234da | 2017-01-13 14:42:47 +0000 | [diff] [blame] | 35 | void SchedulingLatencyVisitorARM64::VisitDataProcWithShifterOp( |
| 36 | HDataProcWithShifterOp* ATTRIBUTE_UNUSED) { |
Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 37 | last_visited_latency_ = kArm64DataProcWithShifterOpLatency; |
| 38 | } |
| 39 | |
| 40 | void SchedulingLatencyVisitorARM64::VisitIntermediateAddress( |
| 41 | HIntermediateAddress* ATTRIBUTE_UNUSED) { |
| 42 | // Although the code generated is a simple `add` instruction, we found through empirical results |
| 43 | // that spacing it from its use in memory accesses was beneficial. |
| 44 | last_visited_latency_ = kArm64IntegerOpLatency + 2; |
| 45 | } |
| 46 | |
Artem Serov | f0fc4c6 | 2017-05-03 15:07:15 +0100 | [diff] [blame^] | 47 | void SchedulingLatencyVisitorARM64::VisitIntermediateAddressIndex( |
| 48 | HIntermediateAddressIndex* instr ATTRIBUTE_UNUSED) { |
| 49 | // Although the code generated is a simple `add` instruction, we found through empirical results |
| 50 | // that spacing it from its use in memory accesses was beneficial. |
| 51 | last_visited_latency_ = kArm64DataProcWithShifterOpLatency + 2; |
| 52 | } |
| 53 | |
Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 54 | void SchedulingLatencyVisitorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* ATTRIBUTE_UNUSED) { |
| 55 | last_visited_latency_ = kArm64MulIntegerLatency; |
| 56 | } |
| 57 | |
| 58 | void SchedulingLatencyVisitorARM64::VisitArrayGet(HArrayGet* instruction) { |
| 59 | if (!instruction->GetArray()->IsIntermediateAddress()) { |
| 60 | // Take the intermediate address computation into account. |
| 61 | last_visited_internal_latency_ = kArm64IntegerOpLatency; |
| 62 | } |
| 63 | last_visited_latency_ = kArm64MemoryLoadLatency; |
| 64 | } |
| 65 | |
| 66 | void SchedulingLatencyVisitorARM64::VisitArrayLength(HArrayLength* ATTRIBUTE_UNUSED) { |
| 67 | last_visited_latency_ = kArm64MemoryLoadLatency; |
| 68 | } |
| 69 | |
| 70 | void SchedulingLatencyVisitorARM64::VisitArraySet(HArraySet* ATTRIBUTE_UNUSED) { |
| 71 | last_visited_latency_ = kArm64MemoryStoreLatency; |
| 72 | } |
| 73 | |
| 74 | void SchedulingLatencyVisitorARM64::VisitBoundsCheck(HBoundsCheck* ATTRIBUTE_UNUSED) { |
| 75 | last_visited_internal_latency_ = kArm64IntegerOpLatency; |
| 76 | // Users do not use any data results. |
| 77 | last_visited_latency_ = 0; |
| 78 | } |
| 79 | |
| 80 | void SchedulingLatencyVisitorARM64::VisitDiv(HDiv* instr) { |
| 81 | Primitive::Type type = instr->GetResultType(); |
| 82 | switch (type) { |
| 83 | case Primitive::kPrimFloat: |
| 84 | last_visited_latency_ = kArm64DivFloatLatency; |
| 85 | break; |
| 86 | case Primitive::kPrimDouble: |
| 87 | last_visited_latency_ = kArm64DivDoubleLatency; |
| 88 | break; |
| 89 | default: |
| 90 | // Follow the code path used by code generation. |
| 91 | if (instr->GetRight()->IsConstant()) { |
| 92 | int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); |
| 93 | if (imm == 0) { |
| 94 | last_visited_internal_latency_ = 0; |
| 95 | last_visited_latency_ = 0; |
| 96 | } else if (imm == 1 || imm == -1) { |
| 97 | last_visited_internal_latency_ = 0; |
| 98 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 99 | } else if (IsPowerOfTwo(AbsOrMin(imm))) { |
| 100 | last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; |
| 101 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 102 | } else { |
| 103 | DCHECK(imm <= -2 || imm >= 2); |
| 104 | last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; |
| 105 | last_visited_latency_ = kArm64MulIntegerLatency; |
| 106 | } |
| 107 | } else { |
| 108 | last_visited_latency_ = kArm64DivIntegerLatency; |
| 109 | } |
| 110 | break; |
| 111 | } |
| 112 | } |
| 113 | |
| 114 | void SchedulingLatencyVisitorARM64::VisitInstanceFieldGet(HInstanceFieldGet* ATTRIBUTE_UNUSED) { |
| 115 | last_visited_latency_ = kArm64MemoryLoadLatency; |
| 116 | } |
| 117 | |
| 118 | void SchedulingLatencyVisitorARM64::VisitInstanceOf(HInstanceOf* ATTRIBUTE_UNUSED) { |
| 119 | last_visited_internal_latency_ = kArm64CallInternalLatency; |
| 120 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 121 | } |
| 122 | |
| 123 | void SchedulingLatencyVisitorARM64::VisitInvoke(HInvoke* ATTRIBUTE_UNUSED) { |
| 124 | last_visited_internal_latency_ = kArm64CallInternalLatency; |
| 125 | last_visited_latency_ = kArm64CallLatency; |
| 126 | } |
| 127 | |
| 128 | void SchedulingLatencyVisitorARM64::VisitLoadString(HLoadString* ATTRIBUTE_UNUSED) { |
| 129 | last_visited_internal_latency_ = kArm64LoadStringInternalLatency; |
| 130 | last_visited_latency_ = kArm64MemoryLoadLatency; |
| 131 | } |
| 132 | |
| 133 | void SchedulingLatencyVisitorARM64::VisitMul(HMul* instr) { |
| 134 | last_visited_latency_ = Primitive::IsFloatingPointType(instr->GetResultType()) |
| 135 | ? kArm64MulFloatingPointLatency |
| 136 | : kArm64MulIntegerLatency; |
| 137 | } |
| 138 | |
| 139 | void SchedulingLatencyVisitorARM64::VisitNewArray(HNewArray* ATTRIBUTE_UNUSED) { |
| 140 | last_visited_internal_latency_ = kArm64IntegerOpLatency + kArm64CallInternalLatency; |
| 141 | last_visited_latency_ = kArm64CallLatency; |
| 142 | } |
| 143 | |
| 144 | void SchedulingLatencyVisitorARM64::VisitNewInstance(HNewInstance* instruction) { |
| 145 | if (instruction->IsStringAlloc()) { |
| 146 | last_visited_internal_latency_ = 2 + kArm64MemoryLoadLatency + kArm64CallInternalLatency; |
| 147 | } else { |
| 148 | last_visited_internal_latency_ = kArm64CallInternalLatency; |
| 149 | } |
| 150 | last_visited_latency_ = kArm64CallLatency; |
| 151 | } |
| 152 | |
| 153 | void SchedulingLatencyVisitorARM64::VisitRem(HRem* instruction) { |
| 154 | if (Primitive::IsFloatingPointType(instruction->GetResultType())) { |
| 155 | last_visited_internal_latency_ = kArm64CallInternalLatency; |
| 156 | last_visited_latency_ = kArm64CallLatency; |
| 157 | } else { |
| 158 | // Follow the code path used by code generation. |
| 159 | if (instruction->GetRight()->IsConstant()) { |
| 160 | int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); |
| 161 | if (imm == 0) { |
| 162 | last_visited_internal_latency_ = 0; |
| 163 | last_visited_latency_ = 0; |
| 164 | } else if (imm == 1 || imm == -1) { |
| 165 | last_visited_internal_latency_ = 0; |
| 166 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 167 | } else if (IsPowerOfTwo(AbsOrMin(imm))) { |
| 168 | last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; |
| 169 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 170 | } else { |
| 171 | DCHECK(imm <= -2 || imm >= 2); |
| 172 | last_visited_internal_latency_ = 4 * kArm64IntegerOpLatency; |
| 173 | last_visited_latency_ = kArm64MulIntegerLatency; |
| 174 | } |
| 175 | } else { |
| 176 | last_visited_internal_latency_ = kArm64DivIntegerLatency; |
| 177 | last_visited_latency_ = kArm64MulIntegerLatency; |
| 178 | } |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | void SchedulingLatencyVisitorARM64::VisitStaticFieldGet(HStaticFieldGet* ATTRIBUTE_UNUSED) { |
| 183 | last_visited_latency_ = kArm64MemoryLoadLatency; |
| 184 | } |
| 185 | |
| 186 | void SchedulingLatencyVisitorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { |
| 187 | HBasicBlock* block = instruction->GetBlock(); |
| 188 | DCHECK((block->GetLoopInformation() != nullptr) || |
| 189 | (block->IsEntryBlock() && instruction->GetNext()->IsGoto())); |
| 190 | // Users do not use any data results. |
| 191 | last_visited_latency_ = 0; |
| 192 | } |
| 193 | |
| 194 | void SchedulingLatencyVisitorARM64::VisitTypeConversion(HTypeConversion* instr) { |
| 195 | if (Primitive::IsFloatingPointType(instr->GetResultType()) || |
| 196 | Primitive::IsFloatingPointType(instr->GetInputType())) { |
| 197 | last_visited_latency_ = kArm64TypeConversionFloatingPointIntegerLatency; |
| 198 | } else { |
| 199 | last_visited_latency_ = kArm64IntegerOpLatency; |
| 200 | } |
| 201 | } |
| 202 | |
Artem Serov | f0fc4c6 | 2017-05-03 15:07:15 +0100 | [diff] [blame^] | 203 | void SchedulingLatencyVisitorARM64::HandleSimpleArithmeticSIMD(HVecOperation *instr) { |
| 204 | if (Primitive::IsFloatingPointType(instr->GetPackedType())) { |
| 205 | last_visited_latency_ = kArm64SIMDFloatingPointOpLatency; |
| 206 | } else { |
| 207 | last_visited_latency_ = kArm64SIMDIntegerOpLatency; |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | void SchedulingLatencyVisitorARM64::VisitVecReplicateScalar( |
| 212 | HVecReplicateScalar* instr ATTRIBUTE_UNUSED) { |
| 213 | last_visited_latency_ = kArm64SIMDReplicateOpLatency; |
| 214 | } |
| 215 | |
| 216 | void SchedulingLatencyVisitorARM64::VisitVecSetScalars(HVecSetScalars* instr) { |
| 217 | LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); |
| 218 | } |
| 219 | |
| 220 | void SchedulingLatencyVisitorARM64::VisitVecSumReduce(HVecSumReduce* instr) { |
| 221 | LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); |
| 222 | } |
| 223 | |
| 224 | void SchedulingLatencyVisitorARM64::VisitVecCnv(HVecCnv* instr ATTRIBUTE_UNUSED) { |
| 225 | last_visited_latency_ = kArm64SIMDTypeConversionInt2FPLatency; |
| 226 | } |
| 227 | |
| 228 | void SchedulingLatencyVisitorARM64::VisitVecNeg(HVecNeg* instr) { |
| 229 | HandleSimpleArithmeticSIMD(instr); |
| 230 | } |
| 231 | |
| 232 | void SchedulingLatencyVisitorARM64::VisitVecAbs(HVecAbs* instr) { |
| 233 | HandleSimpleArithmeticSIMD(instr); |
| 234 | } |
| 235 | |
| 236 | void SchedulingLatencyVisitorARM64::VisitVecNot(HVecNot* instr) { |
| 237 | if (instr->GetPackedType() == Primitive::kPrimBoolean) { |
| 238 | last_visited_internal_latency_ = kArm64SIMDIntegerOpLatency; |
| 239 | } |
| 240 | last_visited_latency_ = kArm64SIMDIntegerOpLatency; |
| 241 | } |
| 242 | |
| 243 | void SchedulingLatencyVisitorARM64::VisitVecAdd(HVecAdd* instr) { |
| 244 | HandleSimpleArithmeticSIMD(instr); |
| 245 | } |
| 246 | |
| 247 | void SchedulingLatencyVisitorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instr) { |
| 248 | HandleSimpleArithmeticSIMD(instr); |
| 249 | } |
| 250 | |
| 251 | void SchedulingLatencyVisitorARM64::VisitVecSub(HVecSub* instr) { |
| 252 | HandleSimpleArithmeticSIMD(instr); |
| 253 | } |
| 254 | |
| 255 | void SchedulingLatencyVisitorARM64::VisitVecMul(HVecMul* instr) { |
| 256 | if (Primitive::IsFloatingPointType(instr->GetPackedType())) { |
| 257 | last_visited_latency_ = kArm64SIMDMulFloatingPointLatency; |
| 258 | } else { |
| 259 | last_visited_latency_ = kArm64SIMDMulIntegerLatency; |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | void SchedulingLatencyVisitorARM64::VisitVecDiv(HVecDiv* instr) { |
| 264 | if (instr->GetPackedType() == Primitive::kPrimFloat) { |
| 265 | last_visited_latency_ = kArm64SIMDDivFloatLatency; |
| 266 | } else { |
| 267 | DCHECK(instr->GetPackedType() == Primitive::kPrimDouble); |
| 268 | last_visited_latency_ = kArm64SIMDDivDoubleLatency; |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | void SchedulingLatencyVisitorARM64::VisitVecMin(HVecMin* instr) { |
| 273 | HandleSimpleArithmeticSIMD(instr); |
| 274 | } |
| 275 | |
| 276 | void SchedulingLatencyVisitorARM64::VisitVecMax(HVecMax* instr) { |
| 277 | HandleSimpleArithmeticSIMD(instr); |
| 278 | } |
| 279 | |
| 280 | void SchedulingLatencyVisitorARM64::VisitVecAnd(HVecAnd* instr ATTRIBUTE_UNUSED) { |
| 281 | last_visited_latency_ = kArm64SIMDIntegerOpLatency; |
| 282 | } |
| 283 | |
| 284 | void SchedulingLatencyVisitorARM64::VisitVecAndNot(HVecAndNot* instr) { |
| 285 | LOG(FATAL) << "Unsupported SIMD instruction " << instr->GetId(); |
| 286 | } |
| 287 | |
| 288 | void SchedulingLatencyVisitorARM64::VisitVecOr(HVecOr* instr ATTRIBUTE_UNUSED) { |
| 289 | last_visited_latency_ = kArm64SIMDIntegerOpLatency; |
| 290 | } |
| 291 | |
| 292 | void SchedulingLatencyVisitorARM64::VisitVecXor(HVecXor* instr ATTRIBUTE_UNUSED) { |
| 293 | last_visited_latency_ = kArm64SIMDIntegerOpLatency; |
| 294 | } |
| 295 | |
| 296 | void SchedulingLatencyVisitorARM64::VisitVecShl(HVecShl* instr) { |
| 297 | HandleSimpleArithmeticSIMD(instr); |
| 298 | } |
| 299 | |
| 300 | void SchedulingLatencyVisitorARM64::VisitVecShr(HVecShr* instr) { |
| 301 | HandleSimpleArithmeticSIMD(instr); |
| 302 | } |
| 303 | |
| 304 | void SchedulingLatencyVisitorARM64::VisitVecUShr(HVecUShr* instr) { |
| 305 | HandleSimpleArithmeticSIMD(instr); |
| 306 | } |
| 307 | |
| 308 | void SchedulingLatencyVisitorARM64::VisitVecMultiplyAccumulate( |
| 309 | HVecMultiplyAccumulate* instr ATTRIBUTE_UNUSED) { |
| 310 | last_visited_latency_ = kArm64SIMDMulIntegerLatency; |
| 311 | } |
| 312 | |
| 313 | void SchedulingLatencyVisitorARM64::HandleVecAddress( |
| 314 | HVecMemoryOperation* instruction, |
| 315 | size_t size ATTRIBUTE_UNUSED) { |
| 316 | HInstruction* index = instruction->InputAt(1); |
| 317 | if (!index->IsConstant()) { |
| 318 | last_visited_internal_latency_ += kArm64DataProcWithShifterOpLatency; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | void SchedulingLatencyVisitorARM64::VisitVecLoad(HVecLoad* instr) { |
| 323 | last_visited_internal_latency_ = 0; |
| 324 | size_t size = Primitive::ComponentSize(instr->GetPackedType()); |
| 325 | |
| 326 | if (instr->GetPackedType() == Primitive::kPrimChar |
| 327 | && mirror::kUseStringCompression |
| 328 | && instr->IsStringCharAt()) { |
| 329 | // Set latencies for the uncompressed case. |
| 330 | last_visited_internal_latency_ += kArm64MemoryLoadLatency + kArm64BranchLatency; |
| 331 | HandleVecAddress(instr, size); |
| 332 | last_visited_latency_ = kArm64SIMDMemoryLoadLatency; |
| 333 | } else { |
| 334 | HandleVecAddress(instr, size); |
| 335 | last_visited_latency_ = kArm64SIMDMemoryLoadLatency; |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) { |
| 340 | last_visited_internal_latency_ = 0; |
| 341 | size_t size = Primitive::ComponentSize(instr->GetPackedType()); |
| 342 | HandleVecAddress(instr, size); |
| 343 | last_visited_latency_ = kArm64SIMDMemoryStoreLatency; |
| 344 | } |
| 345 | |
Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 346 | } // namespace arm64 |
| 347 | } // namespace art |