1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
|
//===- AArch64FrameLowering.cpp - AArch64 Frame Information ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the AArch64 implementation of TargetFrameLowering class.
//
//===----------------------------------------------------------------------===//
#include "AArch64.h"
#include "AArch64FrameLowering.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
void AArch64FrameLowering::splitSPAdjustments(uint64_t Total,
uint64_t &Initial,
uint64_t &Residual) const {
// 0x1f0 here is a pessimistic (i.e. realistic) boundary: x-register LDP
// instructions have a 7-bit signed immediate scaled by 8, giving a reach of
// 0x1f8, but stack adjustment should always be a multiple of 16.
if (Total <= 0x1f0) {
Initial = Total;
Residual = 0;
} else {
Initial = 0x1f0;
Residual = Total - Initial;
}
}
void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
AArch64MachineFunctionInfo *FuncInfo =
MF.getInfo<AArch64MachineFunctionInfo>();
MachineBasicBlock &MBB = MF.front();
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
bool NeedsFrameMoves = MMI.hasDebugInfo()
|| MF.getFunction()->needsUnwindTableEntry();
uint64_t NumInitialBytes, NumResidualBytes;
// Currently we expect the stack to be laid out by
// sub sp, sp, #initial
// stp x29, x30, [sp, #offset]
// ...
// str xxx, [sp, #offset]
// sub sp, sp, #rest (possibly via extra instructions).
if (MFI->getCalleeSavedInfo().size()) {
// If there are callee-saved registers, we want to store them efficiently as
// a block, and virtual base assignment happens too early to do it for us so
// we adjust the stack in two phases: first just for callee-saved fiddling,
// then to allocate the rest of the frame.
splitSPAdjustments(MFI->getStackSize(), NumInitialBytes, NumResidualBytes);
} else {
// If there aren't any callee-saved registers, two-phase adjustment is
// inefficient. It's more efficient to adjust with NumInitialBytes too
// because when we're in a "callee pops argument space" situation, that pop
// must be tacked onto Initial for correctness.
NumInitialBytes = MFI->getStackSize();
NumResidualBytes = 0;
}
// Tell everyone else how much adjustment we're expecting them to use. In
// particular if an adjustment is required for a tail call the epilogue could
// have a different view of things.
FuncInfo->setInitialStackAdjust(NumInitialBytes);
emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16, -NumInitialBytes,
MachineInstr::FrameSetup);
if (NeedsFrameMoves && NumInitialBytes) {
// We emit this update even if the CFA is set from a frame pointer later so
// that the CFA is valid in the interim.
MCSymbol *SPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
.addSym(SPLabel);
MachineLocation Dst(MachineLocation::VirtualFP);
unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
MMI.addFrameInst(
MCCFIInstruction::createDefCfa(SPLabel, Reg, -NumInitialBytes));
}
// Otherwise we need to set the frame pointer and/or add a second stack
// adjustment.
bool FPNeedsSetting = hasFP(MF);
for (; MBBI != MBB.end(); ++MBBI) {
// Note that this search makes strong assumptions about the operation used
// to store the frame-pointer: it must be "STP x29, x30, ...". This could
// change in future, but until then there's no point in implementing
// untestable more generic cases.
if (FPNeedsSetting && MBBI->getOpcode() == AArch64::LSPair64_STR
&& MBBI->getOperand(0).getReg() == AArch64::X29) {
int64_t X29FrameIdx = MBBI->getOperand(2).getIndex();
FuncInfo->setFramePointerOffset(MFI->getObjectOffset(X29FrameIdx));
++MBBI;
emitRegUpdate(MBB, MBBI, DL, TII, AArch64::X29, AArch64::XSP,
AArch64::X29,
NumInitialBytes + MFI->getObjectOffset(X29FrameIdx),
MachineInstr::FrameSetup);
// The offset adjustment used when emitting debugging locations relative
// to whatever frame base is set. AArch64 uses the default frame base (FP
// or SP) and this adjusts the calculations to be correct.
MFI->setOffsetAdjustment(- MFI->getObjectOffset(X29FrameIdx)
- MFI->getStackSize());
if (NeedsFrameMoves) {
MCSymbol *FPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
.addSym(FPLabel);
unsigned Reg = MRI->getDwarfRegNum(AArch64::X29, true);
unsigned Offset = MFI->getObjectOffset(X29FrameIdx);
MMI.addFrameInst(MCCFIInstruction::createDefCfa(FPLabel, Reg, Offset));
}
FPNeedsSetting = false;
}
if (!MBBI->getFlag(MachineInstr::FrameSetup))
break;
}
assert(!FPNeedsSetting && "Frame pointer couldn't be set");
emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16, -NumResidualBytes,
MachineInstr::FrameSetup);
// Now we emit the rest of the frame setup information, if necessary: we've
// already noted the FP and initial SP moves so we're left with the prologue's
// final SP update and callee-saved register locations.
if (!NeedsFrameMoves)
return;
// Reuse the label if appropriate, so create it in this outer scope.
MCSymbol *CSLabel = 0;
// The rest of the stack adjustment
if (!hasFP(MF) && NumResidualBytes) {
CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
.addSym(CSLabel);
MachineLocation Dst(MachineLocation::VirtualFP);
unsigned Reg = MRI->getDwarfRegNum(AArch64::XSP, true);
unsigned Offset = NumResidualBytes + NumInitialBytes;
MMI.addFrameInst(MCCFIInstruction::createDefCfa(CSLabel, Reg, -Offset));
}
// And any callee-saved registers (it's fine to leave them to the end here,
// because the old values are still valid at this point.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.size()) {
if (!CSLabel) {
CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::PROLOG_LABEL))
.addSym(CSLabel);
}
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
unsigned Offset = MFI->getObjectOffset(I->getFrameIdx());
unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, Reg, Offset));
}
}
}
void
AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
AArch64MachineFunctionInfo *FuncInfo =
MF.getInfo<AArch64MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
DebugLoc DL = MBBI->getDebugLoc();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned RetOpcode = MBBI->getOpcode();
// Initial and residual are named for consitency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
uint64_t NumInitialBytes = FuncInfo->getInitialStackAdjust();
uint64_t NumResidualBytes = MFI.getStackSize() - NumInitialBytes;
uint64_t ArgumentPopSize = 0;
if (RetOpcode == AArch64::TC_RETURNdi ||
RetOpcode == AArch64::TC_RETURNxi) {
MachineOperand &JumpTarget = MBBI->getOperand(0);
MachineOperand &StackAdjust = MBBI->getOperand(1);
MachineInstrBuilder MIB;
if (RetOpcode == AArch64::TC_RETURNdi) {
MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::TAIL_Bimm));
if (JumpTarget.isGlobal()) {
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
} else {
assert(JumpTarget.isSymbol() && "unexpected tail call destination");
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
JumpTarget.getTargetFlags());
}
} else {
assert(RetOpcode == AArch64::TC_RETURNxi && JumpTarget.isReg()
&& "Unexpected tail call");
MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::TAIL_BRx));
MIB.addReg(JumpTarget.getReg(), RegState::Kill);
}
// Add the extra operands onto the new tail call instruction even though
// they're not used directly (so that liveness is tracked properly etc).
for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
MIB->addOperand(MBBI->getOperand(i));
// Delete the pseudo instruction TC_RETURN.
MachineInstr *NewMI = std::prev(MBBI);
MBB.erase(MBBI);
MBBI = NewMI;
// For a tail-call in a callee-pops-arguments environment, some or all of
// the stack may actually be in use for the call's arguments, this is
// calculated during LowerCall and consumed here...
ArgumentPopSize = StackAdjust.getImm();
} else {
// ... otherwise the amount to pop is *all* of the argument space,
// conveniently stored in the MachineFunctionInfo by
// LowerFormalArguments. This will, of course, be zero for the C calling
// convention.
ArgumentPopSize = FuncInfo->getArgumentStackToRestore();
}
assert(NumInitialBytes % 16 == 0 && NumResidualBytes % 16 == 0
&& "refusing to adjust stack by misaligned amt");
// We may need to address callee-saved registers differently, so find out the
// bound on the frame indices.
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
int MinCSFI = 0;
int MaxCSFI = -1;
if (CSI.size()) {
MinCSFI = CSI[0].getFrameIdx();
MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
}
// The "residual" stack update comes first from this direction and guarantees
// that SP is NumInitialBytes below its value on function entry, either by a
// direct update or restoring it from the frame pointer.
if (NumInitialBytes + ArgumentPopSize != 0) {
emitSPUpdate(MBB, MBBI, DL, TII, AArch64::X16,
NumInitialBytes + ArgumentPopSize);
--MBBI;
}
// MBBI now points to the instruction just past the last callee-saved
// restoration (either RET/B if NumInitialBytes == 0, or the "ADD sp, sp"
// otherwise).
// Now we need to find out where to put the bulk of the stack adjustment
MachineBasicBlock::iterator FirstEpilogue = MBBI;
while (MBBI != MBB.begin()) {
--MBBI;
unsigned FrameOp;
for (FrameOp = 0; FrameOp < MBBI->getNumOperands(); ++FrameOp) {
if (MBBI->getOperand(FrameOp).isFI())
break;
}
// If this instruction doesn't have a frame index we've reached the end of
// the callee-save restoration.
if (FrameOp == MBBI->getNumOperands())
break;
// Likewise if it *is* a local reference, but not to a callee-saved object.
int FrameIdx = MBBI->getOperand(FrameOp).getIndex();
if (FrameIdx < MinCSFI || FrameIdx > MaxCSFI)
break;
FirstEpilogue = MBBI;
}
if (MF.getFrameInfo()->hasVarSizedObjects()) {
int64_t StaticFrameBase;
StaticFrameBase = -(NumInitialBytes + FuncInfo->getFramePointerOffset());
emitRegUpdate(MBB, FirstEpilogue, DL, TII,
AArch64::XSP, AArch64::X29, AArch64::NoRegister,
StaticFrameBase);
} else {
emitSPUpdate(MBB, FirstEpilogue, DL,TII, AArch64::X16, NumResidualBytes);
}
}
int64_t
AArch64FrameLowering::resolveFrameIndexReference(MachineFunction &MF,
int FrameIndex,
unsigned &FrameReg,
int SPAdj,
bool IsCalleeSaveOp) const {
AArch64MachineFunctionInfo *FuncInfo =
MF.getInfo<AArch64MachineFunctionInfo>();
MachineFrameInfo *MFI = MF.getFrameInfo();
int64_t TopOfFrameOffset = MFI->getObjectOffset(FrameIndex);
assert(!(IsCalleeSaveOp && FuncInfo->getInitialStackAdjust() == 0)
&& "callee-saved register in unexpected place");
// If the frame for this function is particularly large, we adjust the stack
// in two phases which means the callee-save related operations see a
// different (intermediate) stack size.
int64_t FrameRegPos;
if (IsCalleeSaveOp) {
FrameReg = AArch64::XSP;
FrameRegPos = -static_cast<int64_t>(FuncInfo->getInitialStackAdjust());
} else if (useFPForAddressing(MF)) {
// Have to use the frame pointer since we have no idea where SP is.
FrameReg = AArch64::X29;
FrameRegPos = FuncInfo->getFramePointerOffset();
} else {
FrameReg = AArch64::XSP;
FrameRegPos = -static_cast<int64_t>(MFI->getStackSize()) + SPAdj;
}
return TopOfFrameOffset - FrameRegPos;
}
void
AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
const AArch64RegisterInfo *RegInfo =
static_cast<const AArch64RegisterInfo *>(MF.getTarget().getRegisterInfo());
MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64InstrInfo &TII =
*static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
if (hasFP(MF)) {
MF.getRegInfo().setPhysRegUsed(AArch64::X29);
MF.getRegInfo().setPhysRegUsed(AArch64::X30);
}
// If addressing of local variables is going to be more complicated than
// shoving a base register and an offset into the instruction then we may well
// need to scavenge registers. We should either specifically add an
// callee-save register for this purpose or allocate an extra spill slot.
bool BigStack =
MFI->estimateStackSize(MF) >= TII.estimateRSStackLimit(MF)
|| MFI->hasVarSizedObjects() // Access will be from X29: messes things up
|| (MFI->adjustsStack() && !hasReservedCallFrame(MF));
if (!BigStack)
return;
// We certainly need some slack space for the scavenger, preferably an extra
// register.
const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
uint16_t ExtraReg = AArch64::NoRegister;
for (unsigned i = 0; CSRegs[i]; ++i) {
if (AArch64::GPR64RegClass.contains(CSRegs[i]) &&
!MF.getRegInfo().isPhysRegUsed(CSRegs[i])) {
ExtraReg = CSRegs[i];
break;
}
}
if (ExtraReg != 0) {
MF.getRegInfo().setPhysRegUsed(ExtraReg);
} else {
assert(RS && "Expect register scavenger to be available");
// Create a stack slot for scavenging purposes. PrologEpilogInserter
// helpfully places it near either SP or FP for us to avoid
// infinitely-regression during scavenging.
const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
}
}
bool AArch64FrameLowering::determinePrologueDeath(MachineBasicBlock &MBB,
unsigned Reg) const {
// If @llvm.returnaddress is called then it will refer to X30 by some means;
// the prologue store does not kill the register.
if (Reg == AArch64::X30) {
if (MBB.getParent()->getFrameInfo()->isReturnAddressTaken()
&& MBB.getParent()->getRegInfo().isLiveIn(Reg))
return false;
}
// In all other cases, physical registers are dead after they've been saved
// but live at the beginning of the prologue block.
MBB.addLiveIn(Reg);
return true;
}
void
AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI,
const LoadStoreMethod PossClasses[],
unsigned NumClasses) const {
DebugLoc DL = MBB.findDebugLoc(MBBI);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
// A certain amount of implicit contract is present here. The actual stack
// offsets haven't been allocated officially yet, so for strictly correct code
// we rely on the fact that the elements of CSI are allocated in order
// starting at SP, purely as dictated by size and alignment. In practice since
// this function handles the only accesses to those slots it's not quite so
// important.
//
// We have also ordered the Callee-saved register list in AArch64CallingConv
// so that the above scheme puts registers in order: in particular we want
// &X30 to be &X29+8 for an ABI-correct frame record (PCS 5.2.2)
for (unsigned i = 0, e = CSI.size(); i < e; ++i) {
unsigned Reg = CSI[i].getReg();
// First we need to find out which register class the register belongs to so
// that we can use the correct load/store instrucitons.
unsigned ClassIdx;
for (ClassIdx = 0; ClassIdx < NumClasses; ++ClassIdx) {
if (PossClasses[ClassIdx].RegClass->contains(Reg))
break;
}
assert(ClassIdx != NumClasses
&& "Asked to store register in unexpected class");
const TargetRegisterClass &TheClass = *PossClasses[ClassIdx].RegClass;
// Now we need to decide whether it's possible to emit a paired instruction:
// for this we want the next register to be in the same class.
MachineInstrBuilder NewMI;
bool Pair = false;
if (i + 1 < CSI.size() && TheClass.contains(CSI[i+1].getReg())) {
Pair = true;
unsigned StLow = 0, StHigh = 0;
if (isPrologue) {
// Most of these registers will be live-in to the MBB and killed by our
// store, though there are exceptions (see determinePrologueDeath).
StLow = getKillRegState(determinePrologueDeath(MBB, CSI[i+1].getReg()));
StHigh = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg()));
} else {
StLow = RegState::Define;
StHigh = RegState::Define;
}
NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].PairOpcode))
.addReg(CSI[i+1].getReg(), StLow)
.addReg(CSI[i].getReg(), StHigh);
// If it's a paired op, we've consumed two registers
++i;
} else {
unsigned State;
if (isPrologue) {
State = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg()));
} else {
State = RegState::Define;
}
NewMI = BuildMI(MBB, MBBI, DL,
TII.get(PossClasses[ClassIdx].SingleOpcode))
.addReg(CSI[i].getReg(), State);
}
// Note that the FrameIdx refers to the second register in a pair: it will
// be allocated the smaller numeric address and so is the one an LDP/STP
// address must use.
int FrameIdx = CSI[i].getFrameIdx();
MachineMemOperand::MemOperandFlags Flags;
Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
Flags,
Pair ? TheClass.getSize() * 2 : TheClass.getSize(),
MFI.getObjectAlignment(FrameIdx));
NewMI.addFrameIndex(FrameIdx)
.addImm(0) // address-register offset
.addMemOperand(MMO);
if (isPrologue)
NewMI.setMIFlags(MachineInstr::FrameSetup);
// For aesthetic reasons, during an epilogue we want to emit complementary
// operations to the prologue, but in the opposite order. So we still
// iterate through the CalleeSavedInfo list in order, but we put the
// instructions successively earlier in the MBB.
if (!isPrologue)
--MBBI;
}
}
bool
AArch64FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
static const LoadStoreMethod PossibleClasses[] = {
{&AArch64::GPR64RegClass, AArch64::LSPair64_STR, AArch64::LS64_STR},
{&AArch64::FPR64RegClass, AArch64::LSFPPair64_STR, AArch64::LSFP64_STR},
};
const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
emitFrameMemOps(/* isPrologue = */ true, MBB, MBBI, CSI, TRI,
PossibleClasses, NumClasses);
return true;
}
bool
AArch64FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
static const LoadStoreMethod PossibleClasses[] = {
{&AArch64::GPR64RegClass, AArch64::LSPair64_LDR, AArch64::LS64_LDR},
{&AArch64::FPR64RegClass, AArch64::LSFPPair64_LDR, AArch64::LSFP64_LDR},
};
const unsigned NumClasses = llvm::array_lengthof(PossibleClasses);
emitFrameMemOps(/* isPrologue = */ false, MBB, MBBI, CSI, TRI,
PossibleClasses, NumClasses);
return true;
}
bool
AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetRegisterInfo *RI = MF.getTarget().getRegisterInfo();
// This is a decision of ABI compliance. The AArch64 PCS gives various options
// for conformance, and even at the most stringent level more or less permits
// elimination for leaf functions because there's no loss of functionality
// (for debugging etc)..
if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->hasCalls())
return true;
// The following are hard-limits: incorrect code will be generated if we try
// to omit the frame.
return (RI->needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken());
}
bool
AArch64FrameLowering::useFPForAddressing(const MachineFunction &MF) const {
return MF.getFrameInfo()->hasVarSizedObjects();
}
bool
AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Of the various reasons for having a frame pointer, it's actually only
// variable-sized objects that prevent reservation of a call frame.
return !(hasFP(MF) && MFI->hasVarSizedObjects());
}
void
AArch64FrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
const AArch64InstrInfo &TII =
*static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
DebugLoc dl = MI->getDebugLoc();
int Opcode = MI->getOpcode();
bool IsDestroy = Opcode == TII.getCallFrameDestroyOpcode();
uint64_t CalleePopAmount = IsDestroy ? MI->getOperand(1).getImm() : 0;
if (!hasReservedCallFrame(MF)) {
unsigned Align = getStackAlignment();
int64_t Amount = MI->getOperand(0).getImm();
Amount = RoundUpToAlignment(Amount, Align);
if (!IsDestroy) Amount = -Amount;
// N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
// doesn't have to pop anything), then the first operand will be zero too so
// this adjustment is a no-op.
if (CalleePopAmount == 0) {
// FIXME: in-function stack adjustment for calls is limited to 12-bits
// because there's no guaranteed temporary register available. Mostly call
// frames will be allocated at the start of a function so this is OK, but
// it is a limitation that needs dealing with.
assert(Amount > -0xfff && Amount < 0xfff && "call frame too large");
emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, Amount);
}
} else if (CalleePopAmount != 0) {
// If the calling convention demands that the callee pops arguments from the
// stack, we want to add it back if we have a reserved call frame.
assert(CalleePopAmount < 0xfff && "call frame too large");
emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, -CalleePopAmount);
}
MBB.erase(MI);
}
|