Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/lld/MachO/ConcatOutputSection.cpp
34878 views
1
//===- ConcatOutputSection.cpp --------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "ConcatOutputSection.h"
10
#include "Config.h"
11
#include "OutputSegment.h"
12
#include "SymbolTable.h"
13
#include "Symbols.h"
14
#include "SyntheticSections.h"
15
#include "Target.h"
16
#include "lld/Common/CommonLinkerContext.h"
17
#include "llvm/BinaryFormat/MachO.h"
18
#include "llvm/Support/ScopedPrinter.h"
19
#include "llvm/Support/TimeProfiler.h"
20
21
using namespace llvm;
22
using namespace llvm::MachO;
23
using namespace lld;
24
using namespace lld::macho;
25
26
MapVector<NamePair, ConcatOutputSection *> macho::concatOutputSections;
27
28
void ConcatOutputSection::addInput(ConcatInputSection *input) {
29
assert(input->parent == this);
30
if (inputs.empty()) {
31
align = input->align;
32
flags = input->getFlags();
33
} else {
34
align = std::max(align, input->align);
35
finalizeFlags(input);
36
}
37
inputs.push_back(input);
38
}
39
40
// Branch-range extension can be implemented in two ways, either through ...
41
//
42
// (1) Branch islands: Single branch instructions (also of limited range),
43
// that might be chained in multiple hops to reach the desired
44
// destination. On ARM64, as 16 branch islands are needed to hop between
45
// opposite ends of a 2 GiB program. LD64 uses branch islands exclusively,
46
// even when it needs excessive hops.
47
//
48
// (2) Thunks: Instruction(s) to load the destination address into a scratch
49
// register, followed by a register-indirect branch. Thunks are
50
// constructed to reach any arbitrary address, so need not be
51
// chained. Although thunks need not be chained, a program might need
52
// multiple thunks to the same destination distributed throughout a large
53
// program so that all call sites can have one within range.
54
//
55
// The optimal approach is to mix islands for destinations within two hops,
56
// and use thunks for destinations at greater distance. For now, we only
57
// implement thunks. TODO: Adding support for branch islands!
58
//
59
// Internally -- as expressed in LLD's data structures -- a
60
// branch-range-extension thunk consists of:
61
//
62
// (1) new Defined symbol for the thunk named
63
// <FUNCTION>.thunk.<SEQUENCE>, which references ...
64
// (2) new InputSection, which contains ...
65
// (3.1) new data for the instructions to load & branch to the far address +
66
// (3.2) new Relocs on instructions to load the far address, which reference ...
67
// (4.1) existing Defined symbol for the real function in __text, or
68
// (4.2) existing DylibSymbol for the real function in a dylib
69
//
70
// Nearly-optimal thunk-placement algorithm features:
71
//
72
// * Single pass: O(n) on the number of call sites.
73
//
74
// * Accounts for the exact space overhead of thunks - no heuristics
75
//
76
// * Exploits the full range of call instructions - forward & backward
77
//
78
// Data:
79
//
80
// * DenseMap<Symbol *, ThunkInfo> thunkMap: Maps the function symbol
81
// to its thunk bookkeeper.
82
//
83
// * struct ThunkInfo (bookkeeper): Call instructions have limited range, and
84
// distant call sites might be unable to reach the same thunk, so multiple
85
// thunks are necessary to serve all call sites in a very large program. A
86
// thunkInfo stores state for all thunks associated with a particular
87
// function:
88
// (a) thunk symbol
89
// (b) input section containing stub code, and
90
// (c) sequence number for the active thunk incarnation.
91
// When an old thunk goes out of range, we increment the sequence number and
92
// create a new thunk named <FUNCTION>.thunk.<SEQUENCE>.
93
//
94
// * A thunk consists of
95
// (a) a Defined symbol pointing to
96
// (b) an InputSection holding machine code (similar to a MachO stub), and
97
// (c) relocs referencing the real function for fixing up the stub code.
98
//
99
// * std::vector<InputSection *> MergedInputSection::thunks: A vector parallel
100
// to the inputs vector. We store new thunks via cheap vector append, rather
101
// than costly insertion into the inputs vector.
102
//
103
// Control Flow:
104
//
105
// * During address assignment, MergedInputSection::finalize() examines call
106
// sites by ascending address and creates thunks. When a function is beyond
107
// the range of a call site, we need a thunk. Place it at the largest
108
// available forward address from the call site. Call sites increase
109
// monotonically and thunks are always placed as far forward as possible;
110
// thus, we place thunks at monotonically increasing addresses. Once a thunk
111
// is placed, it and all previous input-section addresses are final.
112
//
113
// * ConcatInputSection::finalize() and ConcatInputSection::writeTo() merge
114
// the inputs and thunks vectors (both ordered by ascending address), which
115
// is simple and cheap.
116
117
DenseMap<Symbol *, ThunkInfo> lld::macho::thunkMap;
118
119
// Determine whether we need thunks, which depends on the target arch -- RISC
120
// (i.e., ARM) generally does because it has limited-range branch/call
121
// instructions, whereas CISC (i.e., x86) generally doesn't. RISC only needs
122
// thunks for programs so large that branch source & destination addresses
123
// might differ more than the range of branch instruction(s).
124
bool TextOutputSection::needsThunks() const {
125
if (!target->usesThunks())
126
return false;
127
uint64_t isecAddr = addr;
128
for (ConcatInputSection *isec : inputs)
129
isecAddr = alignToPowerOf2(isecAddr, isec->align) + isec->getSize();
130
if (isecAddr - addr + in.stubs->getSize() <=
131
std::min(target->backwardBranchRange, target->forwardBranchRange))
132
return false;
133
// Yes, this program is large enough to need thunks.
134
for (ConcatInputSection *isec : inputs) {
135
for (Reloc &r : isec->relocs) {
136
if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
137
continue;
138
auto *sym = r.referent.get<Symbol *>();
139
// Pre-populate the thunkMap and memoize call site counts for every
140
// InputSection and ThunkInfo. We do this for the benefit of
141
// estimateStubsInRangeVA().
142
ThunkInfo &thunkInfo = thunkMap[sym];
143
// Knowing ThunkInfo call site count will help us know whether or not we
144
// might need to create more for this referent at the time we are
145
// estimating distance to __stubs in estimateStubsInRangeVA().
146
++thunkInfo.callSiteCount;
147
// We can avoid work on InputSections that have no BRANCH relocs.
148
isec->hasCallSites = true;
149
}
150
}
151
return true;
152
}
153
154
// Since __stubs is placed after __text, we must estimate the address
155
// beyond which stubs are within range of a simple forward branch.
156
// This is called exactly once, when the last input section has been finalized.
157
uint64_t TextOutputSection::estimateStubsInRangeVA(size_t callIdx) const {
158
// Tally the functions which still have call sites remaining to process,
159
// which yields the maximum number of thunks we might yet place.
160
size_t maxPotentialThunks = 0;
161
for (auto &tp : thunkMap) {
162
ThunkInfo &ti = tp.second;
163
// This overcounts: Only sections that are in forward jump range from the
164
// currently-active section get finalized, and all input sections are
165
// finalized when estimateStubsInRangeVA() is called. So only backward
166
// jumps will need thunks, but we count all jumps.
167
if (ti.callSitesUsed < ti.callSiteCount)
168
maxPotentialThunks += 1;
169
}
170
// Tally the total size of input sections remaining to process.
171
uint64_t isecVA = inputs[callIdx]->getVA();
172
uint64_t isecEnd = isecVA;
173
for (size_t i = callIdx; i < inputs.size(); i++) {
174
InputSection *isec = inputs[i];
175
isecEnd = alignToPowerOf2(isecEnd, isec->align) + isec->getSize();
176
}
177
// Estimate the address after which call sites can safely call stubs
178
// directly rather than through intermediary thunks.
179
uint64_t forwardBranchRange = target->forwardBranchRange;
180
assert(isecEnd > forwardBranchRange &&
181
"should not run thunk insertion if all code fits in jump range");
182
assert(isecEnd - isecVA <= forwardBranchRange &&
183
"should only finalize sections in jump range");
184
uint64_t stubsInRangeVA = isecEnd + maxPotentialThunks * target->thunkSize +
185
in.stubs->getSize() - forwardBranchRange;
186
log("thunks = " + std::to_string(thunkMap.size()) +
187
", potential = " + std::to_string(maxPotentialThunks) +
188
", stubs = " + std::to_string(in.stubs->getSize()) + ", isecVA = " +
189
utohexstr(isecVA) + ", threshold = " + utohexstr(stubsInRangeVA) +
190
", isecEnd = " + utohexstr(isecEnd) +
191
", tail = " + utohexstr(isecEnd - isecVA) +
192
", slop = " + utohexstr(forwardBranchRange - (isecEnd - isecVA)));
193
return stubsInRangeVA;
194
}
195
196
void ConcatOutputSection::finalizeOne(ConcatInputSection *isec) {
197
size = alignToPowerOf2(size, isec->align);
198
fileSize = alignToPowerOf2(fileSize, isec->align);
199
isec->outSecOff = size;
200
isec->isFinal = true;
201
size += isec->getSize();
202
fileSize += isec->getFileSize();
203
}
204
205
void ConcatOutputSection::finalizeContents() {
206
for (ConcatInputSection *isec : inputs)
207
finalizeOne(isec);
208
}
209
210
void TextOutputSection::finalize() {
211
if (!needsThunks()) {
212
for (ConcatInputSection *isec : inputs)
213
finalizeOne(isec);
214
return;
215
}
216
217
uint64_t forwardBranchRange = target->forwardBranchRange;
218
uint64_t backwardBranchRange = target->backwardBranchRange;
219
uint64_t stubsInRangeVA = TargetInfo::outOfRangeVA;
220
size_t thunkSize = target->thunkSize;
221
size_t relocCount = 0;
222
size_t callSiteCount = 0;
223
size_t thunkCallCount = 0;
224
size_t thunkCount = 0;
225
226
// Walk all sections in order. Finalize all sections that are less than
227
// forwardBranchRange in front of it.
228
// isecVA is the address of the current section.
229
// addr + size is the start address of the first non-finalized section.
230
231
// inputs[finalIdx] is for finalization (address-assignment)
232
size_t finalIdx = 0;
233
// Kick-off by ensuring that the first input section has an address
234
for (size_t callIdx = 0, endIdx = inputs.size(); callIdx < endIdx;
235
++callIdx) {
236
if (finalIdx == callIdx)
237
finalizeOne(inputs[finalIdx++]);
238
ConcatInputSection *isec = inputs[callIdx];
239
assert(isec->isFinal);
240
uint64_t isecVA = isec->getVA();
241
242
// Assign addresses up-to the forward branch-range limit.
243
// Every call instruction needs a small number of bytes (on Arm64: 4),
244
// and each inserted thunk needs a slightly larger number of bytes
245
// (on Arm64: 12). If a section starts with a branch instruction and
246
// contains several branch instructions in succession, then the distance
247
// from the current position to the position where the thunks are inserted
248
// grows. So leave room for a bunch of thunks.
249
unsigned slop = 256 * thunkSize;
250
while (finalIdx < endIdx) {
251
uint64_t expectedNewSize =
252
alignToPowerOf2(addr + size, inputs[finalIdx]->align) +
253
inputs[finalIdx]->getSize();
254
if (expectedNewSize >= isecVA + forwardBranchRange - slop)
255
break;
256
finalizeOne(inputs[finalIdx++]);
257
}
258
259
if (!isec->hasCallSites)
260
continue;
261
262
if (finalIdx == endIdx && stubsInRangeVA == TargetInfo::outOfRangeVA) {
263
// When we have finalized all input sections, __stubs (destined
264
// to follow __text) comes within range of forward branches and
265
// we can estimate the threshold address after which we can
266
// reach any stub with a forward branch. Note that although it
267
// sits in the middle of a loop, this code executes only once.
268
// It is in the loop because we need to call it at the proper
269
// time: the earliest call site from which the end of __text
270
// (and start of __stubs) comes within range of a forward branch.
271
stubsInRangeVA = estimateStubsInRangeVA(callIdx);
272
}
273
// Process relocs by ascending address, i.e., ascending offset within isec
274
std::vector<Reloc> &relocs = isec->relocs;
275
// FIXME: This property does not hold for object files produced by ld64's
276
// `-r` mode.
277
assert(is_sorted(relocs,
278
[](Reloc &a, Reloc &b) { return a.offset > b.offset; }));
279
for (Reloc &r : reverse(relocs)) {
280
++relocCount;
281
if (!target->hasAttr(r.type, RelocAttrBits::BRANCH))
282
continue;
283
++callSiteCount;
284
// Calculate branch reachability boundaries
285
uint64_t callVA = isecVA + r.offset;
286
uint64_t lowVA =
287
backwardBranchRange < callVA ? callVA - backwardBranchRange : 0;
288
uint64_t highVA = callVA + forwardBranchRange;
289
// Calculate our call referent address
290
auto *funcSym = r.referent.get<Symbol *>();
291
ThunkInfo &thunkInfo = thunkMap[funcSym];
292
// The referent is not reachable, so we need to use a thunk ...
293
if (funcSym->isInStubs() && callVA >= stubsInRangeVA) {
294
assert(callVA != TargetInfo::outOfRangeVA);
295
// ... Oh, wait! We are close enough to the end that __stubs
296
// are now within range of a simple forward branch.
297
continue;
298
}
299
uint64_t funcVA = funcSym->resolveBranchVA();
300
++thunkInfo.callSitesUsed;
301
if (lowVA <= funcVA && funcVA <= highVA) {
302
// The referent is reachable with a simple call instruction.
303
continue;
304
}
305
++thunkInfo.thunkCallCount;
306
++thunkCallCount;
307
// If an existing thunk is reachable, use it ...
308
if (thunkInfo.sym) {
309
uint64_t thunkVA = thunkInfo.isec->getVA();
310
if (lowVA <= thunkVA && thunkVA <= highVA) {
311
r.referent = thunkInfo.sym;
312
continue;
313
}
314
}
315
// ... otherwise, create a new thunk.
316
if (addr + size > highVA) {
317
// There were too many consecutive branch instructions for `slop`
318
// above. If you hit this: For the current algorithm, just bumping up
319
// slop above and trying again is probably simplest. (See also PR51578
320
// comment 5).
321
fatal(Twine(__FUNCTION__) + ": FIXME: thunk range overrun");
322
}
323
thunkInfo.isec =
324
makeSyntheticInputSection(isec->getSegName(), isec->getName());
325
thunkInfo.isec->parent = this;
326
assert(thunkInfo.isec->live);
327
328
StringRef thunkName = saver().save(funcSym->getName() + ".thunk." +
329
std::to_string(thunkInfo.sequence++));
330
if (!isa<Defined>(funcSym) || cast<Defined>(funcSym)->isExternal()) {
331
r.referent = thunkInfo.sym = symtab->addDefined(
332
thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0, thunkSize,
333
/*isWeakDef=*/false, /*isPrivateExtern=*/true,
334
/*isReferencedDynamically=*/false, /*noDeadStrip=*/false,
335
/*isWeakDefCanBeHidden=*/false);
336
} else {
337
r.referent = thunkInfo.sym = make<Defined>(
338
thunkName, /*file=*/nullptr, thunkInfo.isec, /*value=*/0, thunkSize,
339
/*isWeakDef=*/false, /*isExternal=*/false, /*isPrivateExtern=*/true,
340
/*includeInSymtab=*/true, /*isReferencedDynamically=*/false,
341
/*noDeadStrip=*/false, /*isWeakDefCanBeHidden=*/false);
342
}
343
thunkInfo.sym->used = true;
344
target->populateThunk(thunkInfo.isec, funcSym);
345
finalizeOne(thunkInfo.isec);
346
thunks.push_back(thunkInfo.isec);
347
++thunkCount;
348
}
349
}
350
351
log("thunks for " + parent->name + "," + name +
352
": funcs = " + std::to_string(thunkMap.size()) +
353
", relocs = " + std::to_string(relocCount) +
354
", all calls = " + std::to_string(callSiteCount) +
355
", thunk calls = " + std::to_string(thunkCallCount) +
356
", thunks = " + std::to_string(thunkCount));
357
}
358
359
void ConcatOutputSection::writeTo(uint8_t *buf) const {
360
for (ConcatInputSection *isec : inputs)
361
isec->writeTo(buf + isec->outSecOff);
362
}
363
364
void TextOutputSection::writeTo(uint8_t *buf) const {
365
// Merge input sections from thunk & ordinary vectors
366
size_t i = 0, ie = inputs.size();
367
size_t t = 0, te = thunks.size();
368
while (i < ie || t < te) {
369
while (i < ie && (t == te || inputs[i]->empty() ||
370
inputs[i]->outSecOff < thunks[t]->outSecOff)) {
371
inputs[i]->writeTo(buf + inputs[i]->outSecOff);
372
++i;
373
}
374
while (t < te && (i == ie || thunks[t]->outSecOff < inputs[i]->outSecOff)) {
375
thunks[t]->writeTo(buf + thunks[t]->outSecOff);
376
++t;
377
}
378
}
379
}
380
381
void ConcatOutputSection::finalizeFlags(InputSection *input) {
382
switch (sectionType(input->getFlags())) {
383
default /*type-unspec'ed*/:
384
// FIXME: Add additional logic here when supporting emitting obj files.
385
break;
386
case S_4BYTE_LITERALS:
387
case S_8BYTE_LITERALS:
388
case S_16BYTE_LITERALS:
389
case S_CSTRING_LITERALS:
390
case S_ZEROFILL:
391
case S_LAZY_SYMBOL_POINTERS:
392
case S_MOD_TERM_FUNC_POINTERS:
393
case S_THREAD_LOCAL_REGULAR:
394
case S_THREAD_LOCAL_ZEROFILL:
395
case S_THREAD_LOCAL_VARIABLES:
396
case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
397
case S_THREAD_LOCAL_VARIABLE_POINTERS:
398
case S_NON_LAZY_SYMBOL_POINTERS:
399
case S_SYMBOL_STUBS:
400
flags |= input->getFlags();
401
break;
402
}
403
}
404
405
ConcatOutputSection *
406
ConcatOutputSection::getOrCreateForInput(const InputSection *isec) {
407
NamePair names = maybeRenameSection({isec->getSegName(), isec->getName()});
408
ConcatOutputSection *&osec = concatOutputSections[names];
409
if (!osec) {
410
if (isec->getSegName() == segment_names::text &&
411
isec->getName() != section_names::gccExceptTab &&
412
isec->getName() != section_names::ehFrame)
413
osec = make<TextOutputSection>(names.second);
414
else
415
osec = make<ConcatOutputSection>(names.second);
416
}
417
return osec;
418
}
419
420
NamePair macho::maybeRenameSection(NamePair key) {
421
auto newNames = config->sectionRenameMap.find(key);
422
if (newNames != config->sectionRenameMap.end())
423
return newNames->second;
424
return key;
425
}
426
427