blob: d2d65e55537c53d3dfd8bd88fa43c5f752d7df69 [file] [log] [blame]
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +00001//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file implements the InstrBuilder interface.
12///
13//===----------------------------------------------------------------------===//
14
Clement Courbet8178ac82018-12-17 08:08:31 +000015#include "llvm/MCA/InstrBuilder.h"
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +000016#include "llvm/ADT/APInt.h"
Andrea Di Biagiob29b4912018-06-04 12:23:07 +000017#include "llvm/ADT/DenseMap.h"
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000018#include "llvm/MC/MCInst.h"
19#include "llvm/Support/Debug.h"
Andrea Di Biagiof6d466f2018-05-04 13:52:12 +000020#include "llvm/Support/WithColor.h"
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +000021#include "llvm/Support/raw_ostream.h"
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000022
23#define DEBUG_TYPE "llvm-mca"
24
Fangrui Song467c3072018-10-30 15:56:08 +000025namespace llvm {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000026namespace mca {
27
Andrea Di Biagioaa7c2d82018-10-25 11:51:34 +000028InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
29 const llvm::MCInstrInfo &mcii,
30 const llvm::MCRegisterInfo &mri,
Andrea Di Biagio4ed822d2018-12-17 14:00:37 +000031 const llvm::MCInstrAnalysis *mcia)
Andrea Di Biagio71e44212018-11-24 18:40:45 +000032 : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
33 FirstReturnInst(true) {
Andrea Di Biagio437f3bd2019-01-10 13:59:13 +000034 const MCSchedModel &SM = STI.getSchedModel();
35 ProcResourceMasks.resize(SM.getNumProcResourceKinds());
Andrea Di Biagioaa7c2d82018-10-25 11:51:34 +000036 computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
37}
38
Andrea Di Biagioadd7b3e2018-03-24 16:05:36 +000039static void initializeUsedResources(InstrDesc &ID,
40 const MCSchedClassDesc &SCDesc,
41 const MCSubtargetInfo &STI,
42 ArrayRef<uint64_t> ProcResourceMasks) {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000043 const MCSchedModel &SM = STI.getSchedModel();
44
45 // Populate resources consumed.
46 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
47 std::vector<ResourcePlusCycles> Worklist;
Andrea Di Biagiob29b4912018-06-04 12:23:07 +000048
49 // Track cycles contributed by resources that are in a "Super" relationship.
50 // This is required if we want to correctly match the behavior of method
51 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
52 // of "consumed" processor resources and resource cycles, the logic in
53 // ExpandProcResource() doesn't update the number of resource cycles
54 // contributed by a "Super" resource to a group.
55 // We need to take this into account when we find that a processor resource is
56 // part of a group, and it is also used as the "Super" of other resources.
57 // This map stores the number of cycles contributed by sub-resources that are
58 // part of a "Super" resource. The key value is the "Super" resource mask ID.
59 DenseMap<uint64_t, unsigned> SuperResources;
60
Andrea Di Biagiod09d3d92018-11-09 19:30:20 +000061 unsigned NumProcResources = SM.getNumProcResourceKinds();
62 APInt Buffers(NumProcResources, 0);
63
Andrea Di Biagio7fccc802019-01-04 15:08:38 +000064 bool AllInOrderResources = true;
65 bool AnyDispatchHazards = false;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000066 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
67 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
68 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
69 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
Andrea Di Biagio7fccc802019-01-04 15:08:38 +000070 if (PR.BufferSize < 0) {
71 AllInOrderResources = false;
72 } else {
Andrea Di Biagiod09d3d92018-11-09 19:30:20 +000073 Buffers.setBit(PRE->ProcResourceIdx);
Andrea Di Biagio7fccc802019-01-04 15:08:38 +000074 AnyDispatchHazards |= (PR.BufferSize == 0);
75 AllInOrderResources &= (PR.BufferSize <= 1);
76 }
77
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000078 CycleSegment RCy(0, PRE->Cycles, false);
79 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
Andrea Di Biagiob29b4912018-06-04 12:23:07 +000080 if (PR.SuperIdx) {
81 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
82 SuperResources[Super] += PRE->Cycles;
83 }
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000084 }
85
Andrea Di Biagio7fccc802019-01-04 15:08:38 +000086 ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
87
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000088 // Sort elements by mask popcount, so that we prioritize resource units over
89 // resource groups, and smaller groups over larger groups.
Andrea Di Biagio965ed082018-09-28 10:47:24 +000090 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
91 unsigned popcntA = countPopulation(A.first);
92 unsigned popcntB = countPopulation(B.first);
93 if (popcntA < popcntB)
94 return true;
95 if (popcntA > popcntB)
96 return false;
97 return A.first < B.first;
98 });
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +000099
100 uint64_t UsedResourceUnits = 0;
101
102 // Remove cycles contributed by smaller resources.
103 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
104 ResourcePlusCycles &A = Worklist[I];
105 if (!A.second.size()) {
106 A.second.NumUnits = 0;
107 A.second.setReserved();
108 ID.Resources.emplace_back(A);
109 continue;
110 }
111
112 ID.Resources.emplace_back(A);
113 uint64_t NormalizedMask = A.first;
114 if (countPopulation(A.first) == 1) {
115 UsedResourceUnits |= A.first;
116 } else {
117 // Remove the leading 1 from the resource group mask.
118 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
119 }
120
121 for (unsigned J = I + 1; J < E; ++J) {
122 ResourcePlusCycles &B = Worklist[J];
123 if ((NormalizedMask & B.first) == NormalizedMask) {
Matt Davis9aca4e82018-10-01 23:01:45 +0000124 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000125 if (countPopulation(B.first) > 1)
126 B.second.NumUnits++;
127 }
128 }
129 }
130
131 // A SchedWrite may specify a number of cycles in which a resource group
132 // is reserved. For example (on target x86; cpu Haswell):
133 //
134 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
135 // let ResourceCycles = [2, 2, 3];
136 // }
137 //
138 // This means:
139 // Resource units HWPort0 and HWPort1 are both used for 2cy.
140 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
141 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
142 // will not be usable for 2 entire cycles from instruction issue.
143 //
144 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
145 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
146 // extra delay on top of the 2 cycles latency.
147 // During those extra cycles, HWPort01 is not usable by other instructions.
148 for (ResourcePlusCycles &RPC : ID.Resources) {
149 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
150 // Remove the leading 1 from the resource group mask.
151 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
152 if ((Mask & UsedResourceUnits) == Mask)
153 RPC.second.setReserved();
154 }
155 }
156
Andrea Di Biagiod09d3d92018-11-09 19:30:20 +0000157 // Identify extra buffers that are consumed through super resources.
158 for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
159 for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
160 const MCProcResourceDesc &PR = *SM.getProcResource(I);
161 if (PR.BufferSize == -1)
162 continue;
163
164 uint64_t Mask = ProcResourceMasks[I];
165 if (Mask != SR.first && ((Mask & SR.first) == SR.first))
166 Buffers.setBit(I);
167 }
168 }
169
170 // Now set the buffers.
171 if (unsigned NumBuffers = Buffers.countPopulation()) {
172 ID.Buffers.resize(NumBuffers);
173 for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
174 if (Buffers[I]) {
175 --NumBuffers;
176 ID.Buffers[NumBuffers] = ProcResourceMasks[I];
177 }
178 }
179 }
180
Nicola Zaghen0818e782018-05-14 12:53:11 +0000181 LLVM_DEBUG({
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000182 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
Andrea Di Biagio437f3bd2019-01-10 13:59:13 +0000183 dbgs() << "\t\tMask=" << format_hex(R.first, 16) << ", "
184 << "cy=" << R.second.size() << '\n';
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000185 for (const uint64_t R : ID.Buffers)
Evandro Menezesbb50ac52019-01-09 23:57:15 +0000186 dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
Andrea Di Biagio0d055e12018-03-20 12:58:34 +0000187 });
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000188}
189
190static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
191 const MCSchedClassDesc &SCDesc,
192 const MCSubtargetInfo &STI) {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000193 if (MCDesc.isCall()) {
194 // We cannot estimate how long this call will take.
195 // Artificially set an arbitrarily high latency (100cy).
Andrea Di Biagio604cc1c2018-03-13 15:59:59 +0000196 ID.MaxLatency = 100U;
197 return;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000198 }
199
Andrea Di Biagio604cc1c2018-03-13 15:59:59 +0000200 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
201 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
202 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000203}
204
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000205static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000206 // Count register definitions, and skip non register operands in the process.
207 unsigned I, E;
208 unsigned NumExplicitDefs = MCDesc.getNumDefs();
209 for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
210 const MCOperand &Op = MCI.getOperand(I);
211 if (Op.isReg())
212 --NumExplicitDefs;
213 }
214
215 if (NumExplicitDefs) {
216 return make_error<InstructionError<MCInst>>(
217 "Expected more register operand definitions.", MCI);
218 }
219
220 if (MCDesc.hasOptionalDef()) {
221 // Always assume that the optional definition is the last operand.
222 const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
223 if (I == MCI.getNumOperands() || !Op.isReg()) {
224 std::string Message =
225 "expected a register operand for an optional definition. Instruction "
226 "has not been correctly analyzed.";
227 return make_error<InstructionError<MCInst>>(Message, MCI);
228 }
229 }
230
231 return ErrorSuccess();
232}
233
234void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
235 unsigned SchedClassID) {
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000236 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
237 const MCSchedModel &SM = STI.getSchedModel();
238 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
239
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000240 // Assumptions made by this algorithm:
241 // 1. The number of explicit and implicit register definitions in a MCInst
242 // matches the number of explicit and implicit definitions according to
243 // the opcode descriptor (MCInstrDesc).
244 // 2. Uses start at index #(MCDesc.getNumDefs()).
245 // 3. There can only be a single optional register definition, an it is
246 // always the last operand of the sequence (excluding extra operands
247 // contributed by variadic opcodes).
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000248 //
249 // These assumptions work quite well for most out-of-order in-tree targets
250 // like x86. This is mainly because the vast majority of instructions is
251 // expanded to MCInst using a straightforward lowering logic that preserves
252 // the ordering of the operands.
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000253 //
254 // About assumption 1.
255 // The algorithm allows non-register operands between register operand
256 // definitions. This helps to handle some special ARM instructions with
257 // implicit operand increment (-mtriple=armv7):
258 //
259 // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
260 // @ <MCOperand Reg:59>
261 // @ <MCOperand Imm:0> (!!)
262 // @ <MCOperand Reg:67>
263 // @ <MCOperand Imm:0>
264 // @ <MCOperand Imm:14>
265 // @ <MCOperand Reg:0>>
266 //
267 // MCDesc reports:
268 // 6 explicit operands.
269 // 1 optional definition
270 // 2 explicit definitions (!!)
271 //
272 // The presence of an 'Imm' operand between the two register definitions
273 // breaks the assumption that "register definitions are always at the
274 // beginning of the operand sequence".
275 //
276 // To workaround this issue, this algorithm ignores (i.e. skips) any
277 // non-register operands between register definitions. The optional
278 // definition is still at index #(NumOperands-1).
279 //
280 // According to assumption 2. register reads start at #(NumExplicitDefs-1).
281 // That means, register R1 from the example is both read and written.
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000282 unsigned NumExplicitDefs = MCDesc.getNumDefs();
283 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
284 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
285 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
286 if (MCDesc.hasOptionalDef())
287 TotalDefs++;
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000288
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000289 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
290 ID.Writes.resize(TotalDefs + NumVariadicOps);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000291 // Iterate over the operands list, and skip non-register operands.
292 // The first NumExplictDefs register operands are expected to be register
293 // definitions.
294 unsigned CurrentDef = 0;
295 unsigned i = 0;
296 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
297 const MCOperand &Op = MCI.getOperand(i);
298 if (!Op.isReg())
299 continue;
300
301 WriteDescriptor &Write = ID.Writes[CurrentDef];
302 Write.OpIndex = i;
303 if (CurrentDef < NumWriteLatencyEntries) {
304 const MCWriteLatencyEntry &WLE =
305 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
306 // Conservatively default to MaxLatency.
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000307 Write.Latency =
308 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000309 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
310 } else {
311 // Assign a default latency for this write.
312 Write.Latency = ID.MaxLatency;
313 Write.SClassOrWriteResourceID = 0;
314 }
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000315 Write.IsOptionalDef = false;
Nicola Zaghen0818e782018-05-14 12:53:11 +0000316 LLVM_DEBUG({
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000317 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
Andrea Di Biagio6ff281c2018-07-13 14:55:47 +0000318 << ", Latency=" << Write.Latency
Andrea Di Biagio0d055e12018-03-20 12:58:34 +0000319 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
320 });
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000321 CurrentDef++;
322 }
323
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000324 assert(CurrentDef == NumExplicitDefs &&
325 "Expected more register operand definitions.");
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000326 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
327 unsigned Index = NumExplicitDefs + CurrentDef;
328 WriteDescriptor &Write = ID.Writes[Index];
Andrea Di Biagioe0f992e2018-06-22 16:37:05 +0000329 Write.OpIndex = ~CurrentDef;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000330 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
Andrea Di Biagioe509ab02018-04-02 13:46:49 +0000331 if (Index < NumWriteLatencyEntries) {
332 const MCWriteLatencyEntry &WLE =
333 *STI.getWriteLatencyEntry(&SCDesc, Index);
334 // Conservatively default to MaxLatency.
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000335 Write.Latency =
336 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
Andrea Di Biagioe509ab02018-04-02 13:46:49 +0000337 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
338 } else {
339 // Assign a default latency for this write.
340 Write.Latency = ID.MaxLatency;
341 Write.SClassOrWriteResourceID = 0;
342 }
343
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000344 Write.IsOptionalDef = false;
345 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
Andrea Di Biagio6ff281c2018-07-13 14:55:47 +0000346 LLVM_DEBUG({
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000347 dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
Andrea Di Biagio6ff281c2018-07-13 14:55:47 +0000348 << ", PhysReg=" << MRI.getName(Write.RegisterID)
349 << ", Latency=" << Write.Latency
350 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
351 });
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000352 }
353
354 if (MCDesc.hasOptionalDef()) {
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000355 WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
356 Write.OpIndex = MCDesc.getNumOperands() - 1;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000357 // Assign a default latency for this write.
358 Write.Latency = ID.MaxLatency;
359 Write.SClassOrWriteResourceID = 0;
360 Write.IsOptionalDef = true;
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000361 LLVM_DEBUG({
362 dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
363 << ", Latency=" << Write.Latency
364 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
365 });
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000366 }
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000367
368 if (!NumVariadicOps)
369 return;
370
371 // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
372 // "unmodeledSideEffects', then this logic optimistically assumes that any
373 // extra register operands in the variadic sequence is not a register
374 // definition.
375 //
376 // Otherwise, we conservatively assume that any register operand from the
377 // variadic sequence is both a register read and a register write.
378 bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
379 !MCDesc.hasUnmodeledSideEffects();
380 CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
381 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
382 I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
383 const MCOperand &Op = MCI.getOperand(OpIndex);
384 if (!Op.isReg())
385 continue;
386
387 WriteDescriptor &Write = ID.Writes[CurrentDef];
388 Write.OpIndex = OpIndex;
389 // Assign a default latency for this write.
390 Write.Latency = ID.MaxLatency;
391 Write.SClassOrWriteResourceID = 0;
392 Write.IsOptionalDef = false;
393 ++CurrentDef;
394 LLVM_DEBUG({
395 dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
396 << ", Latency=" << Write.Latency
397 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
398 });
399 }
400
401 ID.Writes.resize(CurrentDef);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000402}
403
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000404void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
405 unsigned SchedClassID) {
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000406 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000407 unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000408 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000409 // Remove the optional definition.
410 if (MCDesc.hasOptionalDef())
411 --NumExplicitUses;
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000412 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
413 unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000414 ID.Reads.resize(TotalUses);
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000415 unsigned CurrentUse = 0;
416 for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
417 ++I, ++OpIndex) {
418 const MCOperand &Op = MCI.getOperand(OpIndex);
419 if (!Op.isReg())
420 continue;
421
422 ReadDescriptor &Read = ID.Reads[CurrentUse];
423 Read.OpIndex = OpIndex;
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000424 Read.UseIndex = I;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000425 Read.SchedClassID = SchedClassID;
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000426 ++CurrentUse;
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000427 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
Andrea Di Biagio6ff281c2018-07-13 14:55:47 +0000428 << ", UseIndex=" << Read.UseIndex << '\n');
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000429 }
430
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000431 // For the purpose of ReadAdvance, implicit uses come directly after explicit
432 // uses. The "UseIndex" must be updated according to that implicit layout.
433 for (unsigned I = 0; I < NumImplicitUses; ++I) {
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000434 ReadDescriptor &Read = ID.Reads[CurrentUse + I];
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000435 Read.OpIndex = ~I;
436 Read.UseIndex = NumExplicitUses + I;
437 Read.RegisterID = MCDesc.getImplicitUses()[I];
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000438 Read.SchedClassID = SchedClassID;
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000439 LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
440 << ", UseIndex=" << Read.UseIndex << ", RegisterID="
Andrea Di Biagio6ff281c2018-07-13 14:55:47 +0000441 << MRI.getName(Read.RegisterID) << '\n');
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000442 }
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000443
444 CurrentUse += NumImplicitUses;
445
446 // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
447 // "unmodeledSideEffects", then this logic optimistically assumes that any
448 // extra register operands in the variadic sequence are not register
449 // definition.
450
451 bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
452 !MCDesc.hasUnmodeledSideEffects();
453 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
454 I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
455 const MCOperand &Op = MCI.getOperand(OpIndex);
456 if (!Op.isReg())
457 continue;
458
459 ReadDescriptor &Read = ID.Reads[CurrentUse];
460 Read.OpIndex = OpIndex;
461 Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
462 Read.SchedClassID = SchedClassID;
463 ++CurrentUse;
464 LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
465 << ", UseIndex=" << Read.UseIndex << '\n');
466 }
467
468 ID.Reads.resize(CurrentUse);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000469}
470
Andrea Di Biagio11510932018-10-04 10:36:49 +0000471Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
472 const MCInst &MCI) const {
473 if (ID.NumMicroOps != 0)
474 return ErrorSuccess();
475
476 bool UsesMemory = ID.MayLoad || ID.MayStore;
477 bool UsesBuffers = !ID.Buffers.empty();
478 bool UsesResources = !ID.Resources.empty();
479 if (!UsesMemory && !UsesBuffers && !UsesResources)
480 return ErrorSuccess();
481
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000482 StringRef Message;
Andrea Di Biagio11510932018-10-04 10:36:49 +0000483 if (UsesMemory) {
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000484 Message = "found an inconsistent instruction that decodes "
485 "into zero opcodes and that consumes load/store "
486 "unit resources.";
Andrea Di Biagio11510932018-10-04 10:36:49 +0000487 } else {
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000488 Message = "found an inconsistent instruction that decodes "
489 "to zero opcodes and that consumes scheduler "
490 "resources.";
Andrea Di Biagio11510932018-10-04 10:36:49 +0000491 }
492
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000493 return make_error<InstructionError<MCInst>>(Message, MCI);
Andrea Di Biagio11510932018-10-04 10:36:49 +0000494}
495
Matt Davisfd195ce2018-08-13 18:11:48 +0000496Expected<const InstrDesc &>
497InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000498 assert(STI.getSchedModel().hasInstrSchedModel() &&
499 "Itineraries are not yet supported!");
500
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000501 // Obtain the instruction descriptor from the opcode.
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000502 unsigned short Opcode = MCI.getOpcode();
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000503 const MCInstrDesc &MCDesc = MCII.get(Opcode);
504 const MCSchedModel &SM = STI.getSchedModel();
505
506 // Then obtain the scheduling class information from the instruction.
Andrea Di Biagiof5015302018-05-04 13:10:10 +0000507 unsigned SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000508 bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000509
510 // Try to solve variant scheduling classes.
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000511 if (IsVariant) {
512 unsigned CPUID = SM.getProcessorID();
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000513 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
514 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
515
Matt Davisfd195ce2018-08-13 18:11:48 +0000516 if (!SchedClassID) {
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000517 return make_error<InstructionError<MCInst>>(
518 "unable to resolve scheduling class for write variant.", MCI);
Matt Davisfd195ce2018-08-13 18:11:48 +0000519 }
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000520 }
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000521
Matt Davisfd195ce2018-08-13 18:11:48 +0000522 // Check if this instruction is supported. Otherwise, report an error.
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000523 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
524 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
Andrea Di Biagio8f55d092018-10-24 10:56:47 +0000525 return make_error<InstructionError<MCInst>>(
526 "found an unsupported instruction in the input assembly sequence.",
527 MCI);
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000528 }
529
Andrea Di Biagio437f3bd2019-01-10 13:59:13 +0000530 LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
531 LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
532
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000533 // Create a new empty descriptor.
Andrea Di Biagio0d055e12018-03-20 12:58:34 +0000534 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000535 ID->NumMicroOps = SCDesc.NumMicroOps;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000536
Andrea Di Biagio71e44212018-11-24 18:40:45 +0000537 if (MCDesc.isCall() && FirstCallInst) {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000538 // We don't correctly model calls.
Andrea Di Biagiof6d466f2018-05-04 13:52:12 +0000539 WithColor::warning() << "found a call in the input assembly sequence.\n";
540 WithColor::note() << "call instructions are not correctly modeled. "
541 << "Assume a latency of 100cy.\n";
Andrea Di Biagio71e44212018-11-24 18:40:45 +0000542 FirstCallInst = false;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000543 }
544
Andrea Di Biagio71e44212018-11-24 18:40:45 +0000545 if (MCDesc.isReturn() && FirstReturnInst) {
Andrea Di Biagiof6d466f2018-05-04 13:52:12 +0000546 WithColor::warning() << "found a return instruction in the input"
547 << " assembly sequence.\n";
548 WithColor::note() << "program counter updates are ignored.\n";
Andrea Di Biagio71e44212018-11-24 18:40:45 +0000549 FirstReturnInst = false;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000550 }
551
552 ID->MayLoad = MCDesc.mayLoad();
553 ID->MayStore = MCDesc.mayStore();
554 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
Andrea Di Biagio765a7622018-12-17 14:27:33 +0000555 ID->BeginGroup = SCDesc.BeginGroup;
556 ID->EndGroup = SCDesc.EndGroup;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000557
558 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
Andrea Di Biagio81554792018-04-25 09:38:58 +0000559 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000560
561 if (Error Err = verifyOperands(MCDesc, MCI))
Matt Davisfd195ce2018-08-13 18:11:48 +0000562 return std::move(Err);
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000563
564 populateWrites(*ID, MCI, SchedClassID);
565 populateReads(*ID, MCI, SchedClassID);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000566
Nicola Zaghen0818e782018-05-14 12:53:11 +0000567 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
568 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000569
Andrea Di Biagio11510932018-10-04 10:36:49 +0000570 // Sanity check on the instruction descriptor.
571 if (Error Err = verifyInstrDesc(*ID, MCI))
572 return std::move(Err);
573
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000574 // Now add the new descriptor.
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000575 SchedClassID = MCDesc.getSchedClass();
Andrea Di Biagio8b53eb12018-11-25 12:46:24 +0000576 bool IsVariadic = MCDesc.isVariadic();
577 if (!IsVariadic && !IsVariant) {
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000578 Descriptors[MCI.getOpcode()] = std::move(ID);
579 return *Descriptors[MCI.getOpcode()];
580 }
581
582 VariantDescriptors[&MCI] = std::move(ID);
583 return *VariantDescriptors[&MCI];
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000584}
585
Matt Davisfd195ce2018-08-13 18:11:48 +0000586Expected<const InstrDesc &>
587InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
Andrea Di Biagio47acfad2018-06-04 15:43:09 +0000588 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
589 return *Descriptors[MCI.getOpcode()];
590
591 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
592 return *VariantDescriptors[&MCI];
593
594 return createInstrDescImpl(MCI);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000595}
596
Matt Davisfd195ce2018-08-13 18:11:48 +0000597Expected<std::unique_ptr<Instruction>>
Andrea Di Biagiof5015302018-05-04 13:10:10 +0000598InstrBuilder::createInstruction(const MCInst &MCI) {
Matt Davisfd195ce2018-08-13 18:11:48 +0000599 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
600 if (!DescOrErr)
601 return DescOrErr.takeError();
602 const InstrDesc &D = *DescOrErr;
Andrea Di Biagio0d055e12018-03-20 12:58:34 +0000603 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000604
Andrea Di Biagioc66f16c2018-09-18 15:00:06 +0000605 // Check if this is a dependency breaking instruction.
Andrea Di Biagioa9c15c12018-09-19 15:57:45 +0000606 APInt Mask;
607
Andrea Di Biagio4ed822d2018-12-17 14:00:37 +0000608 bool IsZeroIdiom = false;
609 bool IsDepBreaking = false;
610 if (MCIA) {
611 unsigned ProcID = STI.getSchedModel().getProcessorID();
612 IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
613 IsDepBreaking =
614 IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
615 if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
616 NewIS->setOptimizableMove();
617 }
Andrea Di Biagioc66f16c2018-09-18 15:00:06 +0000618
Andrea Di Biagio81554792018-04-25 09:38:58 +0000619 // Initialize Reads first.
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000620 for (const ReadDescriptor &RD : D.Reads) {
621 int RegID = -1;
Andrea Di Biagioe0f992e2018-06-22 16:37:05 +0000622 if (!RD.isImplicitRead()) {
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000623 // explicit read.
624 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
625 // Skip non-register operands.
626 if (!Op.isReg())
627 continue;
628 RegID = Op.getReg();
629 } else {
630 // Implicit read.
631 RegID = RD.RegisterID;
632 }
633
634 // Skip invalid register operands.
635 if (!RegID)
636 continue;
637
638 // Okay, this is a register operand. Create a ReadState for it.
639 assert(RegID > 0 && "Invalid register ID found!");
Andrea Di Biagioa64e4ae2018-10-25 17:03:51 +0000640 NewIS->getUses().emplace_back(RD, RegID);
641 ReadState &RS = NewIS->getUses().back();
Andrea Di Biagioc66f16c2018-09-18 15:00:06 +0000642
Andrea Di Biagioa9c15c12018-09-19 15:57:45 +0000643 if (IsDepBreaking) {
644 // A mask of all zeroes means: explicit input operands are not
645 // independent.
646 if (Mask.isNullValue()) {
647 if (!RD.isImplicitRead())
Andrea Di Biagioa64e4ae2018-10-25 17:03:51 +0000648 RS.setIndependentFromDef();
Andrea Di Biagioa9c15c12018-09-19 15:57:45 +0000649 } else {
650 // Check if this register operand is independent according to `Mask`.
651 // Note that Mask may not have enough bits to describe all explicit and
652 // implicit input operands. If this register operand doesn't have a
653 // corresponding bit in Mask, then conservatively assume that it is
654 // dependent.
655 if (Mask.getBitWidth() > RD.UseIndex) {
656 // Okay. This map describe register use `RD.UseIndex`.
657 if (Mask[RD.UseIndex])
Andrea Di Biagioa64e4ae2018-10-25 17:03:51 +0000658 RS.setIndependentFromDef();
Andrea Di Biagioa9c15c12018-09-19 15:57:45 +0000659 }
660 }
661 }
Andrea Di Biagiobce59212018-03-20 12:25:54 +0000662 }
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000663
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000664 // Early exit if there are no writes.
665 if (D.Writes.empty())
Matt Davisfd195ce2018-08-13 18:11:48 +0000666 return std::move(NewIS);
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000667
668 // Track register writes that implicitly clear the upper portion of the
669 // underlying super-registers using an APInt.
670 APInt WriteMask(D.Writes.size(), 0);
671
672 // Now query the MCInstrAnalysis object to obtain information about which
673 // register writes implicitly clear the upper portion of a super-register.
Andrea Di Biagio4ed822d2018-12-17 14:00:37 +0000674 if (MCIA)
675 MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000676
Andrea Di Biagio81554792018-04-25 09:38:58 +0000677 // Initialize writes.
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000678 unsigned WriteIndex = 0;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000679 for (const WriteDescriptor &WD : D.Writes) {
Andrea Di Biagiod77ac0d2018-07-09 12:30:55 +0000680 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
681 : MCI.getOperand(WD.OpIndex).getReg();
Andrea Di Biagio280f00a2018-03-22 10:19:20 +0000682 // Check if this is a optional definition that references NoReg.
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000683 if (WD.IsOptionalDef && !RegID) {
684 ++WriteIndex;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000685 continue;
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000686 }
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000687
Andrea Di Biagio280f00a2018-03-22 10:19:20 +0000688 assert(RegID && "Expected a valid register ID!");
Andrea Di Biagio464205d2018-11-23 20:26:57 +0000689 NewIS->getDefs().emplace_back(WD, RegID,
690 /* ClearsSuperRegs */ WriteMask[WriteIndex],
691 /* WritesZero */ IsZeroIdiom);
Andrea Di Biagio9fc96b82018-06-20 10:08:11 +0000692 ++WriteIndex;
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000693 }
694
Matt Davisfd195ce2018-08-13 18:11:48 +0000695 return std::move(NewIS);
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000696}
Andrea Di Biagio29b29cc2018-03-08 13:05:02 +0000697} // namespace mca
Fangrui Song467c3072018-10-30 15:56:08 +0000698} // namespace llvm