Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
35266 views
1
//===-- SparcFrameLowering.cpp - Sparc Frame Information ------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains the Sparc implementation of TargetFrameLowering class.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "SparcFrameLowering.h"
14
#include "SparcInstrInfo.h"
15
#include "SparcMachineFunctionInfo.h"
16
#include "SparcSubtarget.h"
17
#include "llvm/CodeGen/MachineFrameInfo.h"
18
#include "llvm/CodeGen/MachineFunction.h"
19
#include "llvm/CodeGen/MachineInstrBuilder.h"
20
#include "llvm/CodeGen/MachineModuleInfo.h"
21
#include "llvm/CodeGen/MachineRegisterInfo.h"
22
#include "llvm/IR/DataLayout.h"
23
#include "llvm/IR/Function.h"
24
#include "llvm/Support/CommandLine.h"
25
#include "llvm/Target/TargetOptions.h"
26
27
using namespace llvm;
28
29
static cl::opt<bool>
30
DisableLeafProc("disable-sparc-leaf-proc",
31
cl::init(false),
32
cl::desc("Disable Sparc leaf procedure optimization."),
33
cl::Hidden);
34
35
SparcFrameLowering::SparcFrameLowering(const SparcSubtarget &ST)
36
: TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
37
ST.is64Bit() ? Align(16) : Align(8), 0,
38
ST.is64Bit() ? Align(16) : Align(8)) {}
39
40
void SparcFrameLowering::emitSPAdjustment(MachineFunction &MF,
41
MachineBasicBlock &MBB,
42
MachineBasicBlock::iterator MBBI,
43
int NumBytes,
44
unsigned ADDrr,
45
unsigned ADDri) const {
46
47
DebugLoc dl;
48
const SparcInstrInfo &TII =
49
*static_cast<const SparcInstrInfo *>(MF.getSubtarget().getInstrInfo());
50
51
if (NumBytes >= -4096 && NumBytes < 4096) {
52
BuildMI(MBB, MBBI, dl, TII.get(ADDri), SP::O6)
53
.addReg(SP::O6).addImm(NumBytes);
54
return;
55
}
56
57
// Emit this the hard way. This clobbers G1 which we always know is
58
// available here.
59
if (NumBytes >= 0) {
60
// Emit nonnegative numbers with sethi + or.
61
// sethi %hi(NumBytes), %g1
62
// or %g1, %lo(NumBytes), %g1
63
// add %sp, %g1, %sp
64
BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
65
.addImm(HI22(NumBytes));
66
BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
67
.addReg(SP::G1).addImm(LO10(NumBytes));
68
BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
69
.addReg(SP::O6).addReg(SP::G1);
70
return ;
71
}
72
73
// Emit negative numbers with sethi + xor.
74
// sethi %hix(NumBytes), %g1
75
// xor %g1, %lox(NumBytes), %g1
76
// add %sp, %g1, %sp
77
BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
78
.addImm(HIX22(NumBytes));
79
BuildMI(MBB, MBBI, dl, TII.get(SP::XORri), SP::G1)
80
.addReg(SP::G1).addImm(LOX10(NumBytes));
81
BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
82
.addReg(SP::O6).addReg(SP::G1);
83
}
84
85
void SparcFrameLowering::emitPrologue(MachineFunction &MF,
86
MachineBasicBlock &MBB) const {
87
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
88
89
assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
90
MachineFrameInfo &MFI = MF.getFrameInfo();
91
const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
92
const SparcInstrInfo &TII =
93
*static_cast<const SparcInstrInfo *>(Subtarget.getInstrInfo());
94
const SparcRegisterInfo &RegInfo =
95
*static_cast<const SparcRegisterInfo *>(Subtarget.getRegisterInfo());
96
MachineBasicBlock::iterator MBBI = MBB.begin();
97
// Debug location must be unknown since the first debug location is used
98
// to determine the end of the prologue.
99
DebugLoc dl;
100
bool NeedsStackRealignment = RegInfo.shouldRealignStack(MF);
101
102
if (NeedsStackRealignment && !RegInfo.canRealignStack(MF))
103
report_fatal_error("Function \"" + Twine(MF.getName()) + "\" required "
104
"stack re-alignment, but LLVM couldn't handle it "
105
"(probably because it has a dynamic alloca).");
106
107
// Get the number of bytes to allocate from the FrameInfo
108
int NumBytes = (int) MFI.getStackSize();
109
110
unsigned SAVEri = SP::SAVEri;
111
unsigned SAVErr = SP::SAVErr;
112
if (FuncInfo->isLeafProc()) {
113
if (NumBytes == 0)
114
return;
115
SAVEri = SP::ADDri;
116
SAVErr = SP::ADDrr;
117
}
118
119
// The SPARC ABI is a bit odd in that it requires a reserved 92-byte
120
// (128 in v9) area in the user's stack, starting at %sp. Thus, the
121
// first part of the stack that can actually be used is located at
122
// %sp + 92.
123
//
124
// We therefore need to add that offset to the total stack size
125
// after all the stack objects are placed by
126
// PrologEpilogInserter calculateFrameObjectOffsets. However, since the stack needs to be
127
// aligned *after* the extra size is added, we need to disable
128
// calculateFrameObjectOffsets's built-in stack alignment, by having
129
// targetHandlesStackFrameRounding return true.
130
131
132
// Add the extra call frame stack size, if needed. (This is the same
133
// code as in PrologEpilogInserter, but also gets disabled by
134
// targetHandlesStackFrameRounding)
135
if (MFI.adjustsStack() && hasReservedCallFrame(MF))
136
NumBytes += MFI.getMaxCallFrameSize();
137
138
// Adds the SPARC subtarget-specific spill area to the stack
139
// size. Also ensures target-required alignment.
140
NumBytes = Subtarget.getAdjustedFrameSize(NumBytes);
141
142
// Finally, ensure that the size is sufficiently aligned for the
143
// data on the stack.
144
NumBytes = alignTo(NumBytes, MFI.getMaxAlign());
145
146
// Update stack size with corrected value.
147
MFI.setStackSize(NumBytes);
148
149
emitSPAdjustment(MF, MBB, MBBI, -NumBytes, SAVErr, SAVEri);
150
151
unsigned regFP = RegInfo.getDwarfRegNum(SP::I6, true);
152
153
// Emit ".cfi_def_cfa_register 30".
154
unsigned CFIIndex =
155
MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, regFP));
156
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
157
.addCFIIndex(CFIIndex);
158
159
// Emit ".cfi_window_save".
160
CFIIndex = MF.addFrameInst(MCCFIInstruction::createWindowSave(nullptr));
161
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
162
.addCFIIndex(CFIIndex);
163
164
unsigned regInRA = RegInfo.getDwarfRegNum(SP::I7, true);
165
unsigned regOutRA = RegInfo.getDwarfRegNum(SP::O7, true);
166
// Emit ".cfi_register 15, 31".
167
CFIIndex = MF.addFrameInst(
168
MCCFIInstruction::createRegister(nullptr, regOutRA, regInRA));
169
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
170
.addCFIIndex(CFIIndex);
171
172
if (NeedsStackRealignment) {
173
int64_t Bias = Subtarget.getStackPointerBias();
174
unsigned regUnbiased;
175
if (Bias) {
176
// This clobbers G1 which we always know is available here.
177
regUnbiased = SP::G1;
178
// add %o6, BIAS, %g1
179
BuildMI(MBB, MBBI, dl, TII.get(SP::ADDri), regUnbiased)
180
.addReg(SP::O6).addImm(Bias);
181
} else
182
regUnbiased = SP::O6;
183
184
// andn %regUnbiased, MaxAlign-1, %regUnbiased
185
Align MaxAlign = MFI.getMaxAlign();
186
BuildMI(MBB, MBBI, dl, TII.get(SP::ANDNri), regUnbiased)
187
.addReg(regUnbiased)
188
.addImm(MaxAlign.value() - 1U);
189
190
if (Bias) {
191
// add %g1, -BIAS, %o6
192
BuildMI(MBB, MBBI, dl, TII.get(SP::ADDri), SP::O6)
193
.addReg(regUnbiased).addImm(-Bias);
194
}
195
}
196
}
197
198
MachineBasicBlock::iterator SparcFrameLowering::
199
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
200
MachineBasicBlock::iterator I) const {
201
if (!hasReservedCallFrame(MF)) {
202
MachineInstr &MI = *I;
203
int Size = MI.getOperand(0).getImm();
204
if (MI.getOpcode() == SP::ADJCALLSTACKDOWN)
205
Size = -Size;
206
207
if (Size)
208
emitSPAdjustment(MF, MBB, I, Size, SP::ADDrr, SP::ADDri);
209
}
210
return MBB.erase(I);
211
}
212
213
214
void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
215
MachineBasicBlock &MBB) const {
216
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
217
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
218
const SparcInstrInfo &TII =
219
*static_cast<const SparcInstrInfo *>(MF.getSubtarget().getInstrInfo());
220
DebugLoc dl = MBBI->getDebugLoc();
221
assert((MBBI->getOpcode() == SP::RETL || MBBI->getOpcode() == SP::TAIL_CALL ||
222
MBBI->getOpcode() == SP::TAIL_CALLri) &&
223
"Can only put epilog before 'retl' or 'tail_call' instruction!");
224
if (!FuncInfo->isLeafProc()) {
225
BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
226
.addReg(SP::G0);
227
return;
228
}
229
MachineFrameInfo &MFI = MF.getFrameInfo();
230
231
int NumBytes = (int) MFI.getStackSize();
232
if (NumBytes != 0)
233
emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
234
235
// Preserve return address in %o7
236
if (MBBI->getOpcode() == SP::TAIL_CALL) {
237
MBB.addLiveIn(SP::O7);
238
BuildMI(MBB, MBBI, dl, TII.get(SP::ORrr), SP::G1)
239
.addReg(SP::G0)
240
.addReg(SP::O7);
241
BuildMI(MBB, MBBI, dl, TII.get(SP::ORrr), SP::O7)
242
.addReg(SP::G0)
243
.addReg(SP::G1);
244
}
245
}
246
247
bool SparcFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
248
// Reserve call frame if there are no variable sized objects on the stack.
249
return !MF.getFrameInfo().hasVarSizedObjects();
250
}
251
252
// hasFP - Return true if the specified function should have a dedicated frame
253
// pointer register. This is true if the function has variable sized allocas or
254
// if frame pointer elimination is disabled.
255
bool SparcFrameLowering::hasFP(const MachineFunction &MF) const {
256
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
257
258
const MachineFrameInfo &MFI = MF.getFrameInfo();
259
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
260
RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
261
MFI.isFrameAddressTaken();
262
}
263
264
StackOffset
265
SparcFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
266
Register &FrameReg) const {
267
const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
268
const MachineFrameInfo &MFI = MF.getFrameInfo();
269
const SparcRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
270
const SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
271
bool isFixed = MFI.isFixedObjectIndex(FI);
272
273
// Addressable stack objects are accessed using neg. offsets from
274
// %fp, or positive offsets from %sp.
275
bool UseFP;
276
277
// Sparc uses FP-based references in general, even when "hasFP" is
278
// false. That function is rather a misnomer, because %fp is
279
// actually always available, unless isLeafProc.
280
if (FuncInfo->isLeafProc()) {
281
// If there's a leaf proc, all offsets need to be %sp-based,
282
// because we haven't caused %fp to actually point to our frame.
283
UseFP = false;
284
} else if (isFixed) {
285
// Otherwise, argument access should always use %fp.
286
UseFP = true;
287
} else if (RegInfo->hasStackRealignment(MF)) {
288
// If there is dynamic stack realignment, all local object
289
// references need to be via %sp, to take account of the
290
// re-alignment.
291
UseFP = false;
292
} else {
293
// Finally, default to using %fp.
294
UseFP = true;
295
}
296
297
int64_t FrameOffset = MF.getFrameInfo().getObjectOffset(FI) +
298
Subtarget.getStackPointerBias();
299
300
if (UseFP) {
301
FrameReg = RegInfo->getFrameRegister(MF);
302
return StackOffset::getFixed(FrameOffset);
303
} else {
304
FrameReg = SP::O6; // %sp
305
return StackOffset::getFixed(FrameOffset + MF.getFrameInfo().getStackSize());
306
}
307
}
308
309
static bool LLVM_ATTRIBUTE_UNUSED verifyLeafProcRegUse(MachineRegisterInfo *MRI)
310
{
311
312
for (unsigned reg = SP::I0; reg <= SP::I7; ++reg)
313
if (MRI->isPhysRegUsed(reg))
314
return false;
315
316
for (unsigned reg = SP::L0; reg <= SP::L7; ++reg)
317
if (MRI->isPhysRegUsed(reg))
318
return false;
319
320
return true;
321
}
322
323
bool SparcFrameLowering::isLeafProc(MachineFunction &MF) const
324
{
325
326
MachineRegisterInfo &MRI = MF.getRegInfo();
327
MachineFrameInfo &MFI = MF.getFrameInfo();
328
329
return !(MFI.hasCalls() // has calls
330
|| MRI.isPhysRegUsed(SP::L0) // Too many registers needed
331
|| MRI.isPhysRegUsed(SP::O6) // %sp is used
332
|| hasFP(MF) // need %fp
333
|| MF.hasInlineAsm()); // has inline assembly
334
}
335
336
void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const {
337
MachineRegisterInfo &MRI = MF.getRegInfo();
338
// Remap %i[0-7] to %o[0-7].
339
for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
340
if (!MRI.isPhysRegUsed(reg))
341
continue;
342
343
unsigned mapped_reg = reg - SP::I0 + SP::O0;
344
345
// Replace I register with O register.
346
MRI.replaceRegWith(reg, mapped_reg);
347
348
// Also replace register pair super-registers.
349
if ((reg - SP::I0) % 2 == 0) {
350
unsigned preg = (reg - SP::I0) / 2 + SP::I0_I1;
351
unsigned mapped_preg = preg - SP::I0_I1 + SP::O0_O1;
352
MRI.replaceRegWith(preg, mapped_preg);
353
}
354
}
355
356
// Rewrite MBB's Live-ins.
357
for (MachineBasicBlock &MBB : MF) {
358
for (unsigned reg = SP::I0_I1; reg <= SP::I6_I7; ++reg) {
359
if (!MBB.isLiveIn(reg))
360
continue;
361
MBB.removeLiveIn(reg);
362
MBB.addLiveIn(reg - SP::I0_I1 + SP::O0_O1);
363
}
364
for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
365
if (!MBB.isLiveIn(reg))
366
continue;
367
MBB.removeLiveIn(reg);
368
MBB.addLiveIn(reg - SP::I0 + SP::O0);
369
}
370
}
371
372
assert(verifyLeafProcRegUse(&MRI));
373
#ifdef EXPENSIVE_CHECKS
374
MF.verify(0, "After LeafProc Remapping");
375
#endif
376
}
377
378
void SparcFrameLowering::determineCalleeSaves(MachineFunction &MF,
379
BitVector &SavedRegs,
380
RegScavenger *RS) const {
381
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
382
if (!DisableLeafProc && isLeafProc(MF)) {
383
SparcMachineFunctionInfo *MFI = MF.getInfo<SparcMachineFunctionInfo>();
384
MFI->setLeafProc(true);
385
386
remapRegsForLeafProc(MF);
387
}
388
389
}
390
391