Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/lld/ELF/InputSection.cpp
34869 views
1
//===- InputSection.cpp ---------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "InputSection.h"
10
#include "Config.h"
11
#include "InputFiles.h"
12
#include "OutputSections.h"
13
#include "Relocations.h"
14
#include "SymbolTable.h"
15
#include "Symbols.h"
16
#include "SyntheticSections.h"
17
#include "Target.h"
18
#include "lld/Common/CommonLinkerContext.h"
19
#include "llvm/Support/Compiler.h"
20
#include "llvm/Support/Compression.h"
21
#include "llvm/Support/Endian.h"
22
#include "llvm/Support/xxhash.h"
23
#include <algorithm>
24
#include <mutex>
25
#include <optional>
26
#include <vector>
27
28
using namespace llvm;
29
using namespace llvm::ELF;
30
using namespace llvm::object;
31
using namespace llvm::support;
32
using namespace llvm::support::endian;
33
using namespace llvm::sys;
34
using namespace lld;
35
using namespace lld::elf;
36
37
DenseSet<std::pair<const Symbol *, uint64_t>> elf::ppc64noTocRelax;
38
39
// Returns a string to construct an error message.
40
std::string lld::toString(const InputSectionBase *sec) {
41
return (toString(sec->file) + ":(" + sec->name + ")").str();
42
}
43
44
template <class ELFT>
45
static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &file,
46
const typename ELFT::Shdr &hdr) {
47
if (hdr.sh_type == SHT_NOBITS)
48
return ArrayRef<uint8_t>(nullptr, hdr.sh_size);
49
return check(file.getObj().getSectionContents(hdr));
50
}
51
52
InputSectionBase::InputSectionBase(InputFile *file, uint64_t flags,
53
uint32_t type, uint64_t entsize,
54
uint32_t link, uint32_t info,
55
uint32_t addralign, ArrayRef<uint8_t> data,
56
StringRef name, Kind sectionKind)
57
: SectionBase(sectionKind, name, flags, entsize, addralign, type, info,
58
link),
59
file(file), content_(data.data()), size(data.size()) {
60
// In order to reduce memory allocation, we assume that mergeable
61
// sections are smaller than 4 GiB, which is not an unreasonable
62
// assumption as of 2017.
63
if (sectionKind == SectionBase::Merge && content().size() > UINT32_MAX)
64
error(toString(this) + ": section too large");
65
66
// The ELF spec states that a value of 0 means the section has
67
// no alignment constraints.
68
uint32_t v = std::max<uint32_t>(addralign, 1);
69
if (!isPowerOf2_64(v))
70
fatal(toString(this) + ": sh_addralign is not a power of 2");
71
this->addralign = v;
72
73
// If SHF_COMPRESSED is set, parse the header. The legacy .zdebug format is no
74
// longer supported.
75
if (flags & SHF_COMPRESSED)
76
invokeELFT(parseCompressedHeader,);
77
}
78
79
// SHF_INFO_LINK and SHF_GROUP are normally resolved and not copied to the
80
// output section. However, for relocatable linking without
81
// --force-group-allocation, the SHF_GROUP flag and section groups are retained.
82
static uint64_t getFlags(uint64_t flags) {
83
flags &= ~(uint64_t)SHF_INFO_LINK;
84
if (config->resolveGroups)
85
flags &= ~(uint64_t)SHF_GROUP;
86
return flags;
87
}
88
89
template <class ELFT>
90
InputSectionBase::InputSectionBase(ObjFile<ELFT> &file,
91
const typename ELFT::Shdr &hdr,
92
StringRef name, Kind sectionKind)
93
: InputSectionBase(&file, getFlags(hdr.sh_flags), hdr.sh_type,
94
hdr.sh_entsize, hdr.sh_link, hdr.sh_info,
95
hdr.sh_addralign, getSectionContents(file, hdr), name,
96
sectionKind) {
97
// We reject object files having insanely large alignments even though
98
// they are allowed by the spec. I think 4GB is a reasonable limitation.
99
// We might want to relax this in the future.
100
if (hdr.sh_addralign > UINT32_MAX)
101
fatal(toString(&file) + ": section sh_addralign is too large");
102
}
103
104
size_t InputSectionBase::getSize() const {
105
if (auto *s = dyn_cast<SyntheticSection>(this))
106
return s->getSize();
107
return size - bytesDropped;
108
}
109
110
template <class ELFT>
111
static void decompressAux(const InputSectionBase &sec, uint8_t *out,
112
size_t size) {
113
auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(sec.content_);
114
auto compressed = ArrayRef<uint8_t>(sec.content_, sec.compressedSize)
115
.slice(sizeof(typename ELFT::Chdr));
116
if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
117
? compression::zlib::decompress(compressed, out, size)
118
: compression::zstd::decompress(compressed, out, size))
119
fatal(toString(&sec) +
120
": decompress failed: " + llvm::toString(std::move(e)));
121
}
122
123
void InputSectionBase::decompress() const {
124
uint8_t *uncompressedBuf;
125
{
126
static std::mutex mu;
127
std::lock_guard<std::mutex> lock(mu);
128
uncompressedBuf = bAlloc().Allocate<uint8_t>(size);
129
}
130
131
invokeELFT(decompressAux, *this, uncompressedBuf, size);
132
content_ = uncompressedBuf;
133
compressed = false;
134
}
135
136
template <class ELFT>
137
RelsOrRelas<ELFT> InputSectionBase::relsOrRelas(bool supportsCrel) const {
138
if (relSecIdx == 0)
139
return {};
140
RelsOrRelas<ELFT> ret;
141
auto *f = cast<ObjFile<ELFT>>(file);
142
typename ELFT::Shdr shdr = f->template getELFShdrs<ELFT>()[relSecIdx];
143
if (shdr.sh_type == SHT_CREL) {
144
// Return an iterator if supported by caller.
145
if (supportsCrel) {
146
ret.crels = Relocs<typename ELFT::Crel>(
147
(const uint8_t *)f->mb.getBufferStart() + shdr.sh_offset);
148
return ret;
149
}
150
InputSectionBase *const &relSec = f->getSections()[relSecIdx];
151
// Otherwise, allocate a buffer to hold the decoded RELA relocations. When
152
// called for the first time, relSec is null (without --emit-relocs) or an
153
// InputSection with false decodedCrel.
154
if (!relSec || !cast<InputSection>(relSec)->decodedCrel) {
155
auto *sec = makeThreadLocal<InputSection>(*f, shdr, name);
156
f->cacheDecodedCrel(relSecIdx, sec);
157
sec->type = SHT_RELA;
158
sec->decodedCrel = true;
159
160
RelocsCrel<ELFT::Is64Bits> entries(sec->content_);
161
sec->size = entries.size() * sizeof(typename ELFT::Rela);
162
auto *relas = makeThreadLocalN<typename ELFT::Rela>(entries.size());
163
sec->content_ = reinterpret_cast<uint8_t *>(relas);
164
for (auto [i, r] : llvm::enumerate(entries)) {
165
relas[i].r_offset = r.r_offset;
166
relas[i].setSymbolAndType(r.r_symidx, r.r_type, false);
167
relas[i].r_addend = r.r_addend;
168
}
169
}
170
ret.relas = {ArrayRef(
171
reinterpret_cast<const typename ELFT::Rela *>(relSec->content_),
172
relSec->size / sizeof(typename ELFT::Rela))};
173
return ret;
174
}
175
176
const void *content = f->mb.getBufferStart() + shdr.sh_offset;
177
size_t size = shdr.sh_size;
178
if (shdr.sh_type == SHT_REL) {
179
ret.rels = {ArrayRef(reinterpret_cast<const typename ELFT::Rel *>(content),
180
size / sizeof(typename ELFT::Rel))};
181
} else {
182
assert(shdr.sh_type == SHT_RELA);
183
ret.relas = {
184
ArrayRef(reinterpret_cast<const typename ELFT::Rela *>(content),
185
size / sizeof(typename ELFT::Rela))};
186
}
187
return ret;
188
}
189
190
uint64_t SectionBase::getOffset(uint64_t offset) const {
191
switch (kind()) {
192
case Output: {
193
auto *os = cast<OutputSection>(this);
194
// For output sections we treat offset -1 as the end of the section.
195
return offset == uint64_t(-1) ? os->size : offset;
196
}
197
case Regular:
198
case Synthetic:
199
case Spill:
200
return cast<InputSection>(this)->outSecOff + offset;
201
case EHFrame: {
202
// Two code paths may reach here. First, clang_rt.crtbegin.o and GCC
203
// crtbeginT.o may reference the start of an empty .eh_frame to identify the
204
// start of the output .eh_frame. Just return offset.
205
//
206
// Second, InputSection::copyRelocations on .eh_frame. Some pieces may be
207
// discarded due to GC/ICF. We should compute the output section offset.
208
const EhInputSection *es = cast<EhInputSection>(this);
209
if (!es->content().empty())
210
if (InputSection *isec = es->getParent())
211
return isec->outSecOff + es->getParentOffset(offset);
212
return offset;
213
}
214
case Merge:
215
const MergeInputSection *ms = cast<MergeInputSection>(this);
216
if (InputSection *isec = ms->getParent())
217
return isec->outSecOff + ms->getParentOffset(offset);
218
return ms->getParentOffset(offset);
219
}
220
llvm_unreachable("invalid section kind");
221
}
222
223
uint64_t SectionBase::getVA(uint64_t offset) const {
224
const OutputSection *out = getOutputSection();
225
return (out ? out->addr : 0) + getOffset(offset);
226
}
227
228
OutputSection *SectionBase::getOutputSection() {
229
InputSection *sec;
230
if (auto *isec = dyn_cast<InputSection>(this))
231
sec = isec;
232
else if (auto *ms = dyn_cast<MergeInputSection>(this))
233
sec = ms->getParent();
234
else if (auto *eh = dyn_cast<EhInputSection>(this))
235
sec = eh->getParent();
236
else
237
return cast<OutputSection>(this);
238
return sec ? sec->getParent() : nullptr;
239
}
240
241
// When a section is compressed, `rawData` consists with a header followed
242
// by zlib-compressed data. This function parses a header to initialize
243
// `uncompressedSize` member and remove the header from `rawData`.
244
template <typename ELFT> void InputSectionBase::parseCompressedHeader() {
245
flags &= ~(uint64_t)SHF_COMPRESSED;
246
247
// New-style header
248
if (content().size() < sizeof(typename ELFT::Chdr)) {
249
error(toString(this) + ": corrupted compressed section");
250
return;
251
}
252
253
auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content().data());
254
if (hdr->ch_type == ELFCOMPRESS_ZLIB) {
255
if (!compression::zlib::isAvailable())
256
error(toString(this) + " is compressed with ELFCOMPRESS_ZLIB, but lld is "
257
"not built with zlib support");
258
} else if (hdr->ch_type == ELFCOMPRESS_ZSTD) {
259
if (!compression::zstd::isAvailable())
260
error(toString(this) + " is compressed with ELFCOMPRESS_ZSTD, but lld is "
261
"not built with zstd support");
262
} else {
263
error(toString(this) + ": unsupported compression type (" +
264
Twine(hdr->ch_type) + ")");
265
return;
266
}
267
268
compressed = true;
269
compressedSize = size;
270
size = hdr->ch_size;
271
addralign = std::max<uint32_t>(hdr->ch_addralign, 1);
272
}
273
274
InputSection *InputSectionBase::getLinkOrderDep() const {
275
assert(flags & SHF_LINK_ORDER);
276
if (!link)
277
return nullptr;
278
return cast<InputSection>(file->getSections()[link]);
279
}
280
281
// Find a symbol that encloses a given location.
282
Defined *InputSectionBase::getEnclosingSymbol(uint64_t offset,
283
uint8_t type) const {
284
if (file->isInternal())
285
return nullptr;
286
for (Symbol *b : file->getSymbols())
287
if (Defined *d = dyn_cast<Defined>(b))
288
if (d->section == this && d->value <= offset &&
289
offset < d->value + d->size && (type == 0 || type == d->type))
290
return d;
291
return nullptr;
292
}
293
294
// Returns an object file location string. Used to construct an error message.
295
std::string InputSectionBase::getLocation(uint64_t offset) const {
296
std::string secAndOffset =
297
(name + "+0x" + Twine::utohexstr(offset) + ")").str();
298
299
// We don't have file for synthetic sections.
300
if (file == nullptr)
301
return (config->outputFile + ":(" + secAndOffset).str();
302
303
std::string filename = toString(file);
304
if (Defined *d = getEnclosingFunction(offset))
305
return filename + ":(function " + toString(*d) + ": " + secAndOffset;
306
307
return filename + ":(" + secAndOffset;
308
}
309
310
// This function is intended to be used for constructing an error message.
311
// The returned message looks like this:
312
//
313
// foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
314
//
315
// Returns an empty string if there's no way to get line info.
316
std::string InputSectionBase::getSrcMsg(const Symbol &sym,
317
uint64_t offset) const {
318
return file->getSrcMsg(sym, *this, offset);
319
}
320
321
// Returns a filename string along with an optional section name. This
322
// function is intended to be used for constructing an error
323
// message. The returned message looks like this:
324
//
325
// path/to/foo.o:(function bar)
326
//
327
// or
328
//
329
// path/to/foo.o:(function bar) in archive path/to/bar.a
330
std::string InputSectionBase::getObjMsg(uint64_t off) const {
331
std::string filename = std::string(file->getName());
332
333
std::string archive;
334
if (!file->archiveName.empty())
335
archive = (" in archive " + file->archiveName).str();
336
337
// Find a symbol that encloses a given location. getObjMsg may be called
338
// before ObjFile::initSectionsAndLocalSyms where local symbols are
339
// initialized.
340
if (Defined *d = getEnclosingSymbol(off))
341
return filename + ":(" + toString(*d) + ")" + archive;
342
343
// If there's no symbol, print out the offset in the section.
344
return (filename + ":(" + name + "+0x" + utohexstr(off) + ")" + archive)
345
.str();
346
}
347
348
PotentialSpillSection::PotentialSpillSection(const InputSectionBase &source,
349
InputSectionDescription &isd)
350
: InputSection(source.file, source.flags, source.type, source.addralign, {},
351
source.name, SectionBase::Spill),
352
isd(&isd) {}
353
354
InputSection InputSection::discarded(nullptr, 0, 0, 0, ArrayRef<uint8_t>(), "");
355
356
InputSection::InputSection(InputFile *f, uint64_t flags, uint32_t type,
357
uint32_t addralign, ArrayRef<uint8_t> data,
358
StringRef name, Kind k)
359
: InputSectionBase(f, flags, type,
360
/*Entsize*/ 0, /*Link*/ 0, /*Info*/ 0, addralign, data,
361
name, k) {
362
assert(f || this == &InputSection::discarded);
363
}
364
365
template <class ELFT>
366
InputSection::InputSection(ObjFile<ELFT> &f, const typename ELFT::Shdr &header,
367
StringRef name)
368
: InputSectionBase(f, header, name, InputSectionBase::Regular) {}
369
370
// Copy SHT_GROUP section contents. Used only for the -r option.
371
template <class ELFT> void InputSection::copyShtGroup(uint8_t *buf) {
372
// ELFT::Word is the 32-bit integral type in the target endianness.
373
using u32 = typename ELFT::Word;
374
ArrayRef<u32> from = getDataAs<u32>();
375
auto *to = reinterpret_cast<u32 *>(buf);
376
377
// The first entry is not a section number but a flag.
378
*to++ = from[0];
379
380
// Adjust section numbers because section numbers in an input object files are
381
// different in the output. We also need to handle combined or discarded
382
// members.
383
ArrayRef<InputSectionBase *> sections = file->getSections();
384
DenseSet<uint32_t> seen;
385
for (uint32_t idx : from.slice(1)) {
386
OutputSection *osec = sections[idx]->getOutputSection();
387
if (osec && seen.insert(osec->sectionIndex).second)
388
*to++ = osec->sectionIndex;
389
}
390
}
391
392
InputSectionBase *InputSection::getRelocatedSection() const {
393
if (file->isInternal() || !isStaticRelSecType(type))
394
return nullptr;
395
ArrayRef<InputSectionBase *> sections = file->getSections();
396
return sections[info];
397
}
398
399
template <class ELFT, class RelTy>
400
void InputSection::copyRelocations(uint8_t *buf) {
401
if (config->relax && !config->relocatable &&
402
(config->emachine == EM_RISCV || config->emachine == EM_LOONGARCH)) {
403
// On LoongArch and RISC-V, relaxation might change relocations: copy
404
// from internal ones that are updated by relaxation.
405
InputSectionBase *sec = getRelocatedSection();
406
copyRelocations<ELFT, RelTy>(buf, llvm::make_range(sec->relocations.begin(),
407
sec->relocations.end()));
408
} else {
409
// Convert the raw relocations in the input section into Relocation objects
410
// suitable to be used by copyRelocations below.
411
struct MapRel {
412
const ObjFile<ELFT> &file;
413
Relocation operator()(const RelTy &rel) const {
414
// RelExpr is not used so set to a dummy value.
415
return Relocation{R_NONE, rel.getType(config->isMips64EL), rel.r_offset,
416
getAddend<ELFT>(rel), &file.getRelocTargetSym(rel)};
417
}
418
};
419
420
using RawRels = ArrayRef<RelTy>;
421
using MapRelIter =
422
llvm::mapped_iterator<typename RawRels::iterator, MapRel>;
423
auto mapRel = MapRel{*getFile<ELFT>()};
424
RawRels rawRels = getDataAs<RelTy>();
425
auto rels = llvm::make_range(MapRelIter(rawRels.begin(), mapRel),
426
MapRelIter(rawRels.end(), mapRel));
427
copyRelocations<ELFT, RelTy>(buf, rels);
428
}
429
}
430
431
// This is used for -r and --emit-relocs. We can't use memcpy to copy
432
// relocations because we need to update symbol table offset and section index
433
// for each relocation. So we copy relocations one by one.
434
template <class ELFT, class RelTy, class RelIt>
435
void InputSection::copyRelocations(uint8_t *buf,
436
llvm::iterator_range<RelIt> rels) {
437
const TargetInfo &target = *elf::target;
438
InputSectionBase *sec = getRelocatedSection();
439
(void)sec->contentMaybeDecompress(); // uncompress if needed
440
441
for (const Relocation &rel : rels) {
442
RelType type = rel.type;
443
const ObjFile<ELFT> *file = getFile<ELFT>();
444
Symbol &sym = *rel.sym;
445
446
auto *p = reinterpret_cast<typename ELFT::Rela *>(buf);
447
buf += sizeof(RelTy);
448
449
if (RelTy::HasAddend)
450
p->r_addend = rel.addend;
451
452
// Output section VA is zero for -r, so r_offset is an offset within the
453
// section, but for --emit-relocs it is a virtual address.
454
p->r_offset = sec->getVA(rel.offset);
455
p->setSymbolAndType(in.symTab->getSymbolIndex(sym), type,
456
config->isMips64EL);
457
458
if (sym.type == STT_SECTION) {
459
// We combine multiple section symbols into only one per
460
// section. This means we have to update the addend. That is
461
// trivial for Elf_Rela, but for Elf_Rel we have to write to the
462
// section data. We do that by adding to the Relocation vector.
463
464
// .eh_frame is horribly special and can reference discarded sections. To
465
// avoid having to parse and recreate .eh_frame, we just replace any
466
// relocation in it pointing to discarded sections with R_*_NONE, which
467
// hopefully creates a frame that is ignored at runtime. Also, don't warn
468
// on .gcc_except_table and debug sections.
469
//
470
// See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc
471
auto *d = dyn_cast<Defined>(&sym);
472
if (!d) {
473
if (!isDebugSection(*sec) && sec->name != ".eh_frame" &&
474
sec->name != ".gcc_except_table" && sec->name != ".got2" &&
475
sec->name != ".toc") {
476
uint32_t secIdx = cast<Undefined>(sym).discardedSecIdx;
477
Elf_Shdr_Impl<ELFT> sec = file->template getELFShdrs<ELFT>()[secIdx];
478
warn("relocation refers to a discarded section: " +
479
CHECK(file->getObj().getSectionName(sec), file) +
480
"\n>>> referenced by " + getObjMsg(p->r_offset));
481
}
482
p->setSymbolAndType(0, 0, false);
483
continue;
484
}
485
SectionBase *section = d->section;
486
assert(section->isLive());
487
488
int64_t addend = rel.addend;
489
const uint8_t *bufLoc = sec->content().begin() + rel.offset;
490
if (!RelTy::HasAddend)
491
addend = target.getImplicitAddend(bufLoc, type);
492
493
if (config->emachine == EM_MIPS &&
494
target.getRelExpr(type, sym, bufLoc) == R_MIPS_GOTREL) {
495
// Some MIPS relocations depend on "gp" value. By default,
496
// this value has 0x7ff0 offset from a .got section. But
497
// relocatable files produced by a compiler or a linker
498
// might redefine this default value and we must use it
499
// for a calculation of the relocation result. When we
500
// generate EXE or DSO it's trivial. Generating a relocatable
501
// output is more difficult case because the linker does
502
// not calculate relocations in this mode and loses
503
// individual "gp" values used by each input object file.
504
// As a workaround we add the "gp" value to the relocation
505
// addend and save it back to the file.
506
addend += sec->getFile<ELFT>()->mipsGp0;
507
}
508
509
if (RelTy::HasAddend)
510
p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr;
511
// For SHF_ALLOC sections relocated by REL, append a relocation to
512
// sec->relocations so that relocateAlloc transitively called by
513
// writeSections will update the implicit addend. Non-SHF_ALLOC sections
514
// utilize relocateNonAlloc to process raw relocations and do not need
515
// this sec->relocations change.
516
else if (config->relocatable && (sec->flags & SHF_ALLOC) &&
517
type != target.noneRel)
518
sec->addReloc({R_ABS, type, rel.offset, addend, &sym});
519
} else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 &&
520
p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
521
// Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
522
// indicates that r30 is relative to the input section .got2
523
// (r_addend>=0x8000), after linking, r30 should be relative to the output
524
// section .got2 . To compensate for the shift, adjust r_addend by
525
// ppc32Got->outSecOff.
526
p->r_addend += sec->file->ppc32Got2->outSecOff;
527
}
528
}
529
}
530
531
// The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
532
// references specially. The general rule is that the value of the symbol in
533
// this context is the address of the place P. A further special case is that
534
// branch relocations to an undefined weak reference resolve to the next
535
// instruction.
536
static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a,
537
uint32_t p) {
538
switch (type) {
539
// Unresolved branch relocations to weak references resolve to next
540
// instruction, this will be either 2 or 4 bytes on from P.
541
case R_ARM_THM_JUMP8:
542
case R_ARM_THM_JUMP11:
543
return p + 2 + a;
544
case R_ARM_CALL:
545
case R_ARM_JUMP24:
546
case R_ARM_PC24:
547
case R_ARM_PLT32:
548
case R_ARM_PREL31:
549
case R_ARM_THM_JUMP19:
550
case R_ARM_THM_JUMP24:
551
return p + 4 + a;
552
case R_ARM_THM_CALL:
553
// We don't want an interworking BLX to ARM
554
return p + 5 + a;
555
// Unresolved non branch pc-relative relocations
556
// R_ARM_TARGET2 which can be resolved relatively is not present as it never
557
// targets a weak-reference.
558
case R_ARM_MOVW_PREL_NC:
559
case R_ARM_MOVT_PREL:
560
case R_ARM_REL32:
561
case R_ARM_THM_ALU_PREL_11_0:
562
case R_ARM_THM_MOVW_PREL_NC:
563
case R_ARM_THM_MOVT_PREL:
564
case R_ARM_THM_PC12:
565
return p + a;
566
// p + a is unrepresentable as negative immediates can't be encoded.
567
case R_ARM_THM_PC8:
568
return p;
569
}
570
llvm_unreachable("ARM pc-relative relocation expected\n");
571
}
572
573
// The comment above getARMUndefinedRelativeWeakVA applies to this function.
574
static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
575
switch (type) {
576
// Unresolved branch relocations to weak references resolve to next
577
// instruction, this is 4 bytes on from P.
578
case R_AARCH64_CALL26:
579
case R_AARCH64_CONDBR19:
580
case R_AARCH64_JUMP26:
581
case R_AARCH64_TSTBR14:
582
return p + 4;
583
// Unresolved non branch pc-relative relocations
584
case R_AARCH64_PREL16:
585
case R_AARCH64_PREL32:
586
case R_AARCH64_PREL64:
587
case R_AARCH64_ADR_PREL_LO21:
588
case R_AARCH64_LD_PREL_LO19:
589
case R_AARCH64_PLT32:
590
return p;
591
}
592
llvm_unreachable("AArch64 pc-relative relocation expected\n");
593
}
594
595
static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
596
switch (type) {
597
case R_RISCV_BRANCH:
598
case R_RISCV_JAL:
599
case R_RISCV_CALL:
600
case R_RISCV_CALL_PLT:
601
case R_RISCV_RVC_BRANCH:
602
case R_RISCV_RVC_JUMP:
603
case R_RISCV_PLT32:
604
return p;
605
default:
606
return 0;
607
}
608
}
609
610
// ARM SBREL relocations are of the form S + A - B where B is the static base
611
// The ARM ABI defines base to be "addressing origin of the output segment
612
// defining the symbol S". We defined the "addressing origin"/static base to be
613
// the base of the PT_LOAD segment containing the Sym.
614
// The procedure call standard only defines a Read Write Position Independent
615
// RWPI variant so in practice we should expect the static base to be the base
616
// of the RW segment.
617
static uint64_t getARMStaticBase(const Symbol &sym) {
618
OutputSection *os = sym.getOutputSection();
619
if (!os || !os->ptLoad || !os->ptLoad->firstSec)
620
fatal("SBREL relocation to " + sym.getName() + " without static base");
621
return os->ptLoad->firstSec->addr;
622
}
623
624
// For R_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually
625
// points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA
626
// is calculated using PCREL_HI20's symbol.
627
//
628
// This function returns the R_RISCV_PCREL_HI20 relocation from
629
// R_RISCV_PCREL_LO12's symbol and addend.
630
static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) {
631
const Defined *d = cast<Defined>(sym);
632
if (!d->section) {
633
errorOrWarn("R_RISCV_PCREL_LO12 relocation points to an absolute symbol: " +
634
sym->getName());
635
return nullptr;
636
}
637
InputSection *isec = cast<InputSection>(d->section);
638
639
if (addend != 0)
640
warn("non-zero addend in R_RISCV_PCREL_LO12 relocation to " +
641
isec->getObjMsg(d->value) + " is ignored");
642
643
// Relocations are sorted by offset, so we can use std::equal_range to do
644
// binary search.
645
Relocation r;
646
r.offset = d->value;
647
auto range =
648
std::equal_range(isec->relocs().begin(), isec->relocs().end(), r,
649
[](const Relocation &lhs, const Relocation &rhs) {
650
return lhs.offset < rhs.offset;
651
});
652
653
for (auto it = range.first; it != range.second; ++it)
654
if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 ||
655
it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20)
656
return &*it;
657
658
errorOrWarn("R_RISCV_PCREL_LO12 relocation points to " +
659
isec->getObjMsg(d->value) +
660
" without an associated R_RISCV_PCREL_HI20 relocation");
661
return nullptr;
662
}
663
664
// A TLS symbol's virtual address is relative to the TLS segment. Add a
665
// target-specific adjustment to produce a thread-pointer-relative offset.
666
static int64_t getTlsTpOffset(const Symbol &s) {
667
// On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0.
668
if (&s == ElfSym::tlsModuleBase)
669
return 0;
670
671
// There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2
672
// while most others use Variant 1. At run time TP will be aligned to p_align.
673
674
// Variant 1. TP will be followed by an optional gap (which is the size of 2
675
// pointers on ARM/AArch64, 0 on other targets), followed by alignment
676
// padding, then the static TLS blocks. The alignment padding is added so that
677
// (TP + gap + padding) is congruent to p_vaddr modulo p_align.
678
//
679
// Variant 2. Static TLS blocks, followed by alignment padding are placed
680
// before TP. The alignment padding is added so that (TP - padding -
681
// p_memsz) is congruent to p_vaddr modulo p_align.
682
PhdrEntry *tls = Out::tlsPhdr;
683
switch (config->emachine) {
684
// Variant 1.
685
case EM_ARM:
686
case EM_AARCH64:
687
return s.getVA(0) + config->wordsize * 2 +
688
((tls->p_vaddr - config->wordsize * 2) & (tls->p_align - 1));
689
case EM_MIPS:
690
case EM_PPC:
691
case EM_PPC64:
692
// Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
693
// to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
694
// data and 0xf000 of the program's TLS segment.
695
return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
696
case EM_LOONGARCH:
697
case EM_RISCV:
698
// See the comment in handleTlsRelocation. For TLSDESC=>IE,
699
// R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} also reach here. While
700
// `tls` may be null, the return value is ignored.
701
if (s.type != STT_TLS)
702
return 0;
703
return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1));
704
705
// Variant 2.
706
case EM_HEXAGON:
707
case EM_S390:
708
case EM_SPARCV9:
709
case EM_386:
710
case EM_X86_64:
711
return s.getVA(0) - tls->p_memsz -
712
((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
713
default:
714
llvm_unreachable("unhandled Config->EMachine");
715
}
716
}
717
718
uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
719
int64_t a, uint64_t p,
720
const Symbol &sym, RelExpr expr) {
721
switch (expr) {
722
case R_ABS:
723
case R_DTPREL:
724
case R_RELAX_TLS_LD_TO_LE_ABS:
725
case R_RELAX_GOT_PC_NOPIC:
726
case R_AARCH64_AUTH:
727
case R_RISCV_ADD:
728
case R_RISCV_LEB128:
729
return sym.getVA(a);
730
case R_ADDEND:
731
return a;
732
case R_RELAX_HINT:
733
return 0;
734
case R_ARM_SBREL:
735
return sym.getVA(a) - getARMStaticBase(sym);
736
case R_GOT:
737
case R_RELAX_TLS_GD_TO_IE_ABS:
738
return sym.getGotVA() + a;
739
case R_LOONGARCH_GOT:
740
// The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc type
741
// for their page offsets. The arithmetics are different in the TLS case
742
// so we have to duplicate some logic here.
743
if (sym.hasFlag(NEEDS_TLSGD) && type != R_LARCH_TLS_IE_PC_LO12)
744
// Like R_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
745
return in.got->getGlobalDynAddr(sym) + a;
746
return getRelocTargetVA(file, type, a, p, sym, R_GOT);
747
case R_GOTONLY_PC:
748
return in.got->getVA() + a - p;
749
case R_GOTPLTONLY_PC:
750
return in.gotPlt->getVA() + a - p;
751
case R_GOTREL:
752
case R_PPC64_RELAX_TOC:
753
return sym.getVA(a) - in.got->getVA();
754
case R_GOTPLTREL:
755
return sym.getVA(a) - in.gotPlt->getVA();
756
case R_GOTPLT:
757
case R_RELAX_TLS_GD_TO_IE_GOTPLT:
758
return sym.getGotVA() + a - in.gotPlt->getVA();
759
case R_TLSLD_GOT_OFF:
760
case R_GOT_OFF:
761
case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
762
return sym.getGotOffset() + a;
763
case R_AARCH64_GOT_PAGE_PC:
764
case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
765
return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p);
766
case R_AARCH64_GOT_PAGE:
767
return sym.getGotVA() + a - getAArch64Page(in.got->getVA());
768
case R_GOT_PC:
769
case R_RELAX_TLS_GD_TO_IE:
770
return sym.getGotVA() + a - p;
771
case R_GOTPLT_GOTREL:
772
return sym.getGotPltVA() + a - in.got->getVA();
773
case R_GOTPLT_PC:
774
return sym.getGotPltVA() + a - p;
775
case R_LOONGARCH_GOT_PAGE_PC:
776
if (sym.hasFlag(NEEDS_TLSGD))
777
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
778
return getLoongArchPageDelta(sym.getGotVA() + a, p, type);
779
case R_MIPS_GOTREL:
780
return sym.getVA(a) - in.mipsGot->getGp(file);
781
case R_MIPS_GOT_GP:
782
return in.mipsGot->getGp(file) + a;
783
case R_MIPS_GOT_GP_PC: {
784
// R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target
785
// is _gp_disp symbol. In that case we should use the following
786
// formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
787
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
788
// microMIPS variants of these relocations use slightly different
789
// expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
790
// to correctly handle less-significant bit of the microMIPS symbol.
791
uint64_t v = in.mipsGot->getGp(file) + a - p;
792
if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16)
793
v += 4;
794
if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16)
795
v -= 1;
796
return v;
797
}
798
case R_MIPS_GOT_LOCAL_PAGE:
799
// If relocation against MIPS local symbol requires GOT entry, this entry
800
// should be initialized by 'page address'. This address is high 16-bits
801
// of sum the symbol's value and the addend.
802
return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) -
803
in.mipsGot->getGp(file);
804
case R_MIPS_GOT_OFF:
805
case R_MIPS_GOT_OFF32:
806
// In case of MIPS if a GOT relocation has non-zero addend this addend
807
// should be applied to the GOT entry content not to the GOT entry offset.
808
// That is why we use separate expression type.
809
return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) -
810
in.mipsGot->getGp(file);
811
case R_MIPS_TLSGD:
812
return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) -
813
in.mipsGot->getGp(file);
814
case R_MIPS_TLSLD:
815
return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) -
816
in.mipsGot->getGp(file);
817
case R_AARCH64_PAGE_PC: {
818
uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a);
819
return getAArch64Page(val) - getAArch64Page(p);
820
}
821
case R_RISCV_PC_INDIRECT: {
822
if (const Relocation *hiRel = getRISCVPCRelHi20(&sym, a))
823
return getRelocTargetVA(file, hiRel->type, hiRel->addend, sym.getVA(),
824
*hiRel->sym, hiRel->expr);
825
return 0;
826
}
827
case R_LOONGARCH_PAGE_PC:
828
return getLoongArchPageDelta(sym.getVA(a), p, type);
829
case R_PC:
830
case R_ARM_PCA: {
831
uint64_t dest;
832
if (expr == R_ARM_PCA)
833
// Some PC relative ARM (Thumb) relocations align down the place.
834
p = p & 0xfffffffc;
835
if (sym.isUndefined()) {
836
// On ARM and AArch64 a branch to an undefined weak resolves to the next
837
// instruction, otherwise the place. On RISC-V, resolve an undefined weak
838
// to the same instruction to cause an infinite loop (making the user
839
// aware of the issue) while ensuring no overflow.
840
// Note: if the symbol is hidden, its binding has been converted to local,
841
// so we just check isUndefined() here.
842
if (config->emachine == EM_ARM)
843
dest = getARMUndefinedRelativeWeakVA(type, a, p);
844
else if (config->emachine == EM_AARCH64)
845
dest = getAArch64UndefinedRelativeWeakVA(type, p) + a;
846
else if (config->emachine == EM_PPC)
847
dest = p;
848
else if (config->emachine == EM_RISCV)
849
dest = getRISCVUndefinedRelativeWeakVA(type, p) + a;
850
else
851
dest = sym.getVA(a);
852
} else {
853
dest = sym.getVA(a);
854
}
855
return dest - p;
856
}
857
case R_PLT:
858
return sym.getPltVA() + a;
859
case R_PLT_PC:
860
case R_PPC64_CALL_PLT:
861
return sym.getPltVA() + a - p;
862
case R_LOONGARCH_PLT_PAGE_PC:
863
return getLoongArchPageDelta(sym.getPltVA() + a, p, type);
864
case R_PLT_GOTPLT:
865
return sym.getPltVA() + a - in.gotPlt->getVA();
866
case R_PLT_GOTREL:
867
return sym.getPltVA() + a - in.got->getVA();
868
case R_PPC32_PLTREL:
869
// R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
870
// stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
871
// target VA computation.
872
return sym.getPltVA() - p;
873
case R_PPC64_CALL: {
874
uint64_t symVA = sym.getVA(a);
875
// If we have an undefined weak symbol, we might get here with a symbol
876
// address of zero. That could overflow, but the code must be unreachable,
877
// so don't bother doing anything at all.
878
if (!symVA)
879
return 0;
880
881
// PPC64 V2 ABI describes two entry points to a function. The global entry
882
// point is used for calls where the caller and callee (may) have different
883
// TOC base pointers and r2 needs to be modified to hold the TOC base for
884
// the callee. For local calls the caller and callee share the same
885
// TOC base and so the TOC pointer initialization code should be skipped by
886
// branching to the local entry point.
887
return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther);
888
}
889
case R_PPC64_TOCBASE:
890
return getPPC64TocBase() + a;
891
case R_RELAX_GOT_PC:
892
case R_PPC64_RELAX_GOT_PC:
893
return sym.getVA(a) - p;
894
case R_RELAX_TLS_GD_TO_LE:
895
case R_RELAX_TLS_IE_TO_LE:
896
case R_RELAX_TLS_LD_TO_LE:
897
case R_TPREL:
898
// It is not very clear what to return if the symbol is undefined. With
899
// --noinhibit-exec, even a non-weak undefined reference may reach here.
900
// Just return A, which matches R_ABS, and the behavior of some dynamic
901
// loaders.
902
if (sym.isUndefined())
903
return a;
904
return getTlsTpOffset(sym) + a;
905
case R_RELAX_TLS_GD_TO_LE_NEG:
906
case R_TPREL_NEG:
907
if (sym.isUndefined())
908
return a;
909
return -getTlsTpOffset(sym) + a;
910
case R_SIZE:
911
return sym.getSize() + a;
912
case R_TLSDESC:
913
return in.got->getTlsDescAddr(sym) + a;
914
case R_TLSDESC_PC:
915
return in.got->getTlsDescAddr(sym) + a - p;
916
case R_TLSDESC_GOTPLT:
917
return in.got->getTlsDescAddr(sym) + a - in.gotPlt->getVA();
918
case R_AARCH64_TLSDESC_PAGE:
919
return getAArch64Page(in.got->getTlsDescAddr(sym) + a) - getAArch64Page(p);
920
case R_LOONGARCH_TLSDESC_PAGE_PC:
921
return getLoongArchPageDelta(in.got->getTlsDescAddr(sym) + a, p, type);
922
case R_TLSGD_GOT:
923
return in.got->getGlobalDynOffset(sym) + a;
924
case R_TLSGD_GOTPLT:
925
return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA();
926
case R_TLSGD_PC:
927
return in.got->getGlobalDynAddr(sym) + a - p;
928
case R_LOONGARCH_TLSGD_PAGE_PC:
929
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
930
case R_TLSLD_GOTPLT:
931
return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA();
932
case R_TLSLD_GOT:
933
return in.got->getTlsIndexOff() + a;
934
case R_TLSLD_PC:
935
return in.got->getTlsIndexVA() + a - p;
936
default:
937
llvm_unreachable("invalid expression");
938
}
939
}
940
941
// This function applies relocations to sections without SHF_ALLOC bit.
942
// Such sections are never mapped to memory at runtime. Debug sections are
943
// an example. Relocations in non-alloc sections are much easier to
944
// handle than in allocated sections because it will never need complex
945
// treatment such as GOT or PLT (because at runtime no one refers them).
946
// So, we handle relocations for non-alloc sections directly in this
947
// function as a performance optimization.
948
template <class ELFT, class RelTy>
949
void InputSection::relocateNonAlloc(uint8_t *buf, Relocs<RelTy> rels) {
950
const unsigned bits = sizeof(typename ELFT::uint) * 8;
951
const TargetInfo &target = *elf::target;
952
const auto emachine = config->emachine;
953
const bool isDebug = isDebugSection(*this);
954
const bool isDebugLine = isDebug && name == ".debug_line";
955
std::optional<uint64_t> tombstone;
956
if (isDebug) {
957
if (name == ".debug_loc" || name == ".debug_ranges")
958
tombstone = 1;
959
else if (name == ".debug_names")
960
tombstone = UINT64_MAX; // tombstone value
961
else
962
tombstone = 0;
963
}
964
for (const auto &patAndValue : llvm::reverse(config->deadRelocInNonAlloc))
965
if (patAndValue.first.match(this->name)) {
966
tombstone = patAndValue.second;
967
break;
968
}
969
970
const InputFile *f = this->file;
971
for (auto it = rels.begin(), end = rels.end(); it != end; ++it) {
972
const RelTy &rel = *it;
973
const RelType type = rel.getType(config->isMips64EL);
974
const uint64_t offset = rel.r_offset;
975
uint8_t *bufLoc = buf + offset;
976
int64_t addend = getAddend<ELFT>(rel);
977
if (!RelTy::HasAddend)
978
addend += target.getImplicitAddend(bufLoc, type);
979
980
Symbol &sym = f->getRelocTargetSym(rel);
981
RelExpr expr = target.getRelExpr(type, sym, bufLoc);
982
if (expr == R_NONE)
983
continue;
984
auto *ds = dyn_cast<Defined>(&sym);
985
986
if (emachine == EM_RISCV && type == R_RISCV_SET_ULEB128) {
987
if (++it != end &&
988
it->getType(/*isMips64EL=*/false) == R_RISCV_SUB_ULEB128 &&
989
it->r_offset == offset) {
990
uint64_t val;
991
if (!ds && tombstone) {
992
val = *tombstone;
993
} else {
994
val = sym.getVA(addend) -
995
(f->getRelocTargetSym(*it).getVA(0) + getAddend<ELFT>(*it));
996
}
997
if (overwriteULEB128(bufLoc, val) >= 0x80)
998
errorOrWarn(getLocation(offset) + ": ULEB128 value " + Twine(val) +
999
" exceeds available space; references '" +
1000
lld::toString(sym) + "'");
1001
continue;
1002
}
1003
errorOrWarn(getLocation(offset) +
1004
": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128");
1005
return;
1006
}
1007
1008
if (tombstone && (expr == R_ABS || expr == R_DTPREL)) {
1009
// Resolve relocations in .debug_* referencing (discarded symbols or ICF
1010
// folded section symbols) to a tombstone value. Resolving to addend is
1011
// unsatisfactory because the result address range may collide with a
1012
// valid range of low address, or leave multiple CUs claiming ownership of
1013
// the same range of code, which may confuse consumers.
1014
//
1015
// To address the problems, we use -1 as a tombstone value for most
1016
// .debug_* sections. We have to ignore the addend because we don't want
1017
// to resolve an address attribute (which may have a non-zero addend) to
1018
// -1+addend (wrap around to a low address).
1019
//
1020
// R_DTPREL type relocations represent an offset into the dynamic thread
1021
// vector. The computed value is st_value plus a non-negative offset.
1022
// Negative values are invalid, so -1 can be used as the tombstone value.
1023
//
1024
// If the referenced symbol is relative to a discarded section (due to
1025
// --gc-sections, COMDAT, etc), it has been converted to a Undefined.
1026
// `ds->folded` catches the ICF folded case. However, resolving a
1027
// relocation in .debug_line to -1 would stop debugger users from setting
1028
// breakpoints on the folded-in function, so exclude .debug_line.
1029
//
1030
// For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
1031
// (base address selection entry), use 1 (which is used by GNU ld for
1032
// .debug_ranges).
1033
//
1034
// TODO To reduce disruption, we use 0 instead of -1 as the tombstone
1035
// value. Enable -1 in a future release.
1036
if (!ds || (ds->folded && !isDebugLine)) {
1037
// If -z dead-reloc-in-nonalloc= is specified, respect it.
1038
uint64_t value = SignExtend64<bits>(*tombstone);
1039
// For a 32-bit local TU reference in .debug_names, X86_64::relocate
1040
// requires that the unsigned value for R_X86_64_32 is truncated to
1041
// 32-bit. Other 64-bit targets's don't discern signed/unsigned 32-bit
1042
// absolute relocations and do not need this change.
1043
if (emachine == EM_X86_64 && type == R_X86_64_32)
1044
value = static_cast<uint32_t>(value);
1045
target.relocateNoSym(bufLoc, type, value);
1046
continue;
1047
}
1048
}
1049
1050
// For a relocatable link, content relocated by relocation types with an
1051
// explicit addend, such as RELA, remain unchanged and we can stop here.
1052
// While content relocated by relocation types with an implicit addend, such
1053
// as REL, needs the implicit addend updated.
1054
if (config->relocatable && (RelTy::HasAddend || sym.type != STT_SECTION))
1055
continue;
1056
1057
// R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
1058
// sections.
1059
if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL ||
1060
expr == R_RISCV_ADD) {
1061
target.relocateNoSym(bufLoc, type, SignExtend64<bits>(sym.getVA(addend)));
1062
continue;
1063
}
1064
1065
if (expr == R_SIZE) {
1066
target.relocateNoSym(bufLoc, type,
1067
SignExtend64<bits>(sym.getSize() + addend));
1068
continue;
1069
}
1070
1071
std::string msg = getLocation(offset) + ": has non-ABS relocation " +
1072
toString(type) + " against symbol '" + toString(sym) +
1073
"'";
1074
if (expr != R_PC && !(emachine == EM_386 && type == R_386_GOTPC)) {
1075
errorOrWarn(msg);
1076
return;
1077
}
1078
1079
// If the control reaches here, we found a PC-relative relocation in a
1080
// non-ALLOC section. Since non-ALLOC section is not loaded into memory
1081
// at runtime, the notion of PC-relative doesn't make sense here. So,
1082
// this is a usage error. However, GNU linkers historically accept such
1083
// relocations without any errors and relocate them as if they were at
1084
// address 0. For bug-compatibility, we accept them with warnings. We
1085
// know Steel Bank Common Lisp as of 2018 have this bug.
1086
//
1087
// GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
1088
// against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed in
1089
// 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we need to
1090
// keep this bug-compatible code for a while.
1091
warn(msg);
1092
target.relocateNoSym(
1093
bufLoc, type,
1094
SignExtend64<bits>(sym.getVA(addend - offset - outSecOff)));
1095
}
1096
}
1097
1098
template <class ELFT>
1099
void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) {
1100
if ((flags & SHF_EXECINSTR) && LLVM_UNLIKELY(getFile<ELFT>()->splitStack))
1101
adjustSplitStackFunctionPrologues<ELFT>(buf, bufEnd);
1102
1103
if (flags & SHF_ALLOC) {
1104
target->relocateAlloc(*this, buf);
1105
return;
1106
}
1107
1108
auto *sec = cast<InputSection>(this);
1109
// For a relocatable link, also call relocateNonAlloc() to rewrite applicable
1110
// locations with tombstone values.
1111
invokeOnRelocs(*sec, sec->relocateNonAlloc<ELFT>, buf);
1112
}
1113
1114
// For each function-defining prologue, find any calls to __morestack,
1115
// and replace them with calls to __morestack_non_split.
1116
static void switchMorestackCallsToMorestackNonSplit(
1117
DenseSet<Defined *> &prologues,
1118
SmallVector<Relocation *, 0> &morestackCalls) {
1119
1120
// If the target adjusted a function's prologue, all calls to
1121
// __morestack inside that function should be switched to
1122
// __morestack_non_split.
1123
Symbol *moreStackNonSplit = symtab.find("__morestack_non_split");
1124
if (!moreStackNonSplit) {
1125
error("mixing split-stack objects requires a definition of "
1126
"__morestack_non_split");
1127
return;
1128
}
1129
1130
// Sort both collections to compare addresses efficiently.
1131
llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) {
1132
return l->offset < r->offset;
1133
});
1134
std::vector<Defined *> functions(prologues.begin(), prologues.end());
1135
llvm::sort(functions, [](const Defined *l, const Defined *r) {
1136
return l->value < r->value;
1137
});
1138
1139
auto it = morestackCalls.begin();
1140
for (Defined *f : functions) {
1141
// Find the first call to __morestack within the function.
1142
while (it != morestackCalls.end() && (*it)->offset < f->value)
1143
++it;
1144
// Adjust all calls inside the function.
1145
while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) {
1146
(*it)->sym = moreStackNonSplit;
1147
++it;
1148
}
1149
}
1150
}
1151
1152
static bool enclosingPrologueAttempted(uint64_t offset,
1153
const DenseSet<Defined *> &prologues) {
1154
for (Defined *f : prologues)
1155
if (f->value <= offset && offset < f->value + f->size)
1156
return true;
1157
return false;
1158
}
1159
1160
// If a function compiled for split stack calls a function not
1161
// compiled for split stack, then the caller needs its prologue
1162
// adjusted to ensure that the called function will have enough stack
1163
// available. Find those functions, and adjust their prologues.
1164
template <class ELFT>
1165
void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
1166
uint8_t *end) {
1167
DenseSet<Defined *> prologues;
1168
SmallVector<Relocation *, 0> morestackCalls;
1169
1170
for (Relocation &rel : relocs()) {
1171
// Ignore calls into the split-stack api.
1172
if (rel.sym->getName().starts_with("__morestack")) {
1173
if (rel.sym->getName() == "__morestack")
1174
morestackCalls.push_back(&rel);
1175
continue;
1176
}
1177
1178
// A relocation to non-function isn't relevant. Sometimes
1179
// __morestack is not marked as a function, so this check comes
1180
// after the name check.
1181
if (rel.sym->type != STT_FUNC)
1182
continue;
1183
1184
// If the callee's-file was compiled with split stack, nothing to do. In
1185
// this context, a "Defined" symbol is one "defined by the binary currently
1186
// being produced". So an "undefined" symbol might be provided by a shared
1187
// library. It is not possible to tell how such symbols were compiled, so be
1188
// conservative.
1189
if (Defined *d = dyn_cast<Defined>(rel.sym))
1190
if (InputSection *isec = cast_or_null<InputSection>(d->section))
1191
if (!isec || !isec->getFile<ELFT>() || isec->getFile<ELFT>()->splitStack)
1192
continue;
1193
1194
if (enclosingPrologueAttempted(rel.offset, prologues))
1195
continue;
1196
1197
if (Defined *f = getEnclosingFunction(rel.offset)) {
1198
prologues.insert(f);
1199
if (target->adjustPrologueForCrossSplitStack(buf + f->value, end,
1200
f->stOther))
1201
continue;
1202
if (!getFile<ELFT>()->someNoSplitStack)
1203
error(lld::toString(this) + ": " + f->getName() +
1204
" (with -fsplit-stack) calls " + rel.sym->getName() +
1205
" (without -fsplit-stack), but couldn't adjust its prologue");
1206
}
1207
}
1208
1209
if (target->needsMoreStackNonSplit)
1210
switchMorestackCallsToMorestackNonSplit(prologues, morestackCalls);
1211
}
1212
1213
template <class ELFT> void InputSection::writeTo(uint8_t *buf) {
1214
if (LLVM_UNLIKELY(type == SHT_NOBITS))
1215
return;
1216
// If -r or --emit-relocs is given, then an InputSection
1217
// may be a relocation section.
1218
if (LLVM_UNLIKELY(type == SHT_RELA)) {
1219
copyRelocations<ELFT, typename ELFT::Rela>(buf);
1220
return;
1221
}
1222
if (LLVM_UNLIKELY(type == SHT_REL)) {
1223
copyRelocations<ELFT, typename ELFT::Rel>(buf);
1224
return;
1225
}
1226
1227
// If -r is given, we may have a SHT_GROUP section.
1228
if (LLVM_UNLIKELY(type == SHT_GROUP)) {
1229
copyShtGroup<ELFT>(buf);
1230
return;
1231
}
1232
1233
// If this is a compressed section, uncompress section contents directly
1234
// to the buffer.
1235
if (compressed) {
1236
auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content_);
1237
auto compressed = ArrayRef<uint8_t>(content_, compressedSize)
1238
.slice(sizeof(typename ELFT::Chdr));
1239
size_t size = this->size;
1240
if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
1241
? compression::zlib::decompress(compressed, buf, size)
1242
: compression::zstd::decompress(compressed, buf, size))
1243
fatal(toString(this) +
1244
": decompress failed: " + llvm::toString(std::move(e)));
1245
uint8_t *bufEnd = buf + size;
1246
relocate<ELFT>(buf, bufEnd);
1247
return;
1248
}
1249
1250
// Copy section contents from source object file to output file
1251
// and then apply relocations.
1252
memcpy(buf, content().data(), content().size());
1253
relocate<ELFT>(buf, buf + content().size());
1254
}
1255
1256
void InputSection::replace(InputSection *other) {
1257
addralign = std::max(addralign, other->addralign);
1258
1259
// When a section is replaced with another section that was allocated to
1260
// another partition, the replacement section (and its associated sections)
1261
// need to be placed in the main partition so that both partitions will be
1262
// able to access it.
1263
if (partition != other->partition) {
1264
partition = 1;
1265
for (InputSection *isec : dependentSections)
1266
isec->partition = 1;
1267
}
1268
1269
other->repl = repl;
1270
other->markDead();
1271
}
1272
1273
template <class ELFT>
1274
EhInputSection::EhInputSection(ObjFile<ELFT> &f,
1275
const typename ELFT::Shdr &header,
1276
StringRef name)
1277
: InputSectionBase(f, header, name, InputSectionBase::EHFrame) {}
1278
1279
SyntheticSection *EhInputSection::getParent() const {
1280
return cast_or_null<SyntheticSection>(parent);
1281
}
1282
1283
// .eh_frame is a sequence of CIE or FDE records.
1284
// This function splits an input section into records and returns them.
1285
template <class ELFT> void EhInputSection::split() {
1286
const RelsOrRelas<ELFT> rels = relsOrRelas<ELFT>(/*supportsCrel=*/false);
1287
// getReloc expects the relocations to be sorted by r_offset. See the comment
1288
// in scanRelocs.
1289
if (rels.areRelocsRel()) {
1290
SmallVector<typename ELFT::Rel, 0> storage;
1291
split<ELFT>(sortRels(rels.rels, storage));
1292
} else {
1293
SmallVector<typename ELFT::Rela, 0> storage;
1294
split<ELFT>(sortRels(rels.relas, storage));
1295
}
1296
}
1297
1298
template <class ELFT, class RelTy>
1299
void EhInputSection::split(ArrayRef<RelTy> rels) {
1300
ArrayRef<uint8_t> d = content();
1301
const char *msg = nullptr;
1302
unsigned relI = 0;
1303
while (!d.empty()) {
1304
if (d.size() < 4) {
1305
msg = "CIE/FDE too small";
1306
break;
1307
}
1308
uint64_t size = endian::read32<ELFT::Endianness>(d.data());
1309
if (size == 0) // ZERO terminator
1310
break;
1311
uint32_t id = endian::read32<ELFT::Endianness>(d.data() + 4);
1312
size += 4;
1313
if (LLVM_UNLIKELY(size > d.size())) {
1314
// If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
1315
// but we do not support that format yet.
1316
msg = size == UINT32_MAX + uint64_t(4)
1317
? "CIE/FDE too large"
1318
: "CIE/FDE ends past the end of the section";
1319
break;
1320
}
1321
1322
// Find the first relocation that points to [off,off+size). Relocations
1323
// have been sorted by r_offset.
1324
const uint64_t off = d.data() - content().data();
1325
while (relI != rels.size() && rels[relI].r_offset < off)
1326
++relI;
1327
unsigned firstRel = -1;
1328
if (relI != rels.size() && rels[relI].r_offset < off + size)
1329
firstRel = relI;
1330
(id == 0 ? cies : fdes).emplace_back(off, this, size, firstRel);
1331
d = d.slice(size);
1332
}
1333
if (msg)
1334
errorOrWarn("corrupted .eh_frame: " + Twine(msg) + "\n>>> defined in " +
1335
getObjMsg(d.data() - content().data()));
1336
}
1337
1338
// Return the offset in an output section for a given input offset.
1339
uint64_t EhInputSection::getParentOffset(uint64_t offset) const {
1340
auto it = partition_point(
1341
fdes, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1342
if (it == fdes.begin() || it[-1].inputOff + it[-1].size <= offset) {
1343
it = partition_point(
1344
cies, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1345
if (it == cies.begin()) // invalid piece
1346
return offset;
1347
}
1348
if (it[-1].outputOff == -1) // invalid piece
1349
return offset - it[-1].inputOff;
1350
return it[-1].outputOff + (offset - it[-1].inputOff);
1351
}
1352
1353
static size_t findNull(StringRef s, size_t entSize) {
1354
for (unsigned i = 0, n = s.size(); i != n; i += entSize) {
1355
const char *b = s.begin() + i;
1356
if (std::all_of(b, b + entSize, [](char c) { return c == 0; }))
1357
return i;
1358
}
1359
llvm_unreachable("");
1360
}
1361
1362
// Split SHF_STRINGS section. Such section is a sequence of
1363
// null-terminated strings.
1364
void MergeInputSection::splitStrings(StringRef s, size_t entSize) {
1365
const bool live = !(flags & SHF_ALLOC) || !config->gcSections;
1366
const char *p = s.data(), *end = s.data() + s.size();
1367
if (!std::all_of(end - entSize, end, [](char c) { return c == 0; }))
1368
fatal(toString(this) + ": string is not null terminated");
1369
if (entSize == 1) {
1370
// Optimize the common case.
1371
do {
1372
size_t size = strlen(p);
1373
pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1374
p += size + 1;
1375
} while (p != end);
1376
} else {
1377
do {
1378
size_t size = findNull(StringRef(p, end - p), entSize);
1379
pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1380
p += size + entSize;
1381
} while (p != end);
1382
}
1383
}
1384
1385
// Split non-SHF_STRINGS section. Such section is a sequence of
1386
// fixed size records.
1387
void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> data,
1388
size_t entSize) {
1389
size_t size = data.size();
1390
assert((size % entSize) == 0);
1391
const bool live = !(flags & SHF_ALLOC) || !config->gcSections;
1392
1393
pieces.resize_for_overwrite(size / entSize);
1394
for (size_t i = 0, j = 0; i != size; i += entSize, j++)
1395
pieces[j] = {i, (uint32_t)xxh3_64bits(data.slice(i, entSize)), live};
1396
}
1397
1398
template <class ELFT>
1399
MergeInputSection::MergeInputSection(ObjFile<ELFT> &f,
1400
const typename ELFT::Shdr &header,
1401
StringRef name)
1402
: InputSectionBase(f, header, name, InputSectionBase::Merge) {}
1403
1404
MergeInputSection::MergeInputSection(uint64_t flags, uint32_t type,
1405
uint64_t entsize, ArrayRef<uint8_t> data,
1406
StringRef name)
1407
: InputSectionBase(nullptr, flags, type, entsize, /*Link*/ 0, /*Info*/ 0,
1408
/*Alignment*/ entsize, data, name, SectionBase::Merge) {}
1409
1410
// This function is called after we obtain a complete list of input sections
1411
// that need to be linked. This is responsible to split section contents
1412
// into small chunks for further processing.
1413
//
1414
// Note that this function is called from parallelForEach. This must be
1415
// thread-safe (i.e. no memory allocation from the pools).
1416
void MergeInputSection::splitIntoPieces() {
1417
assert(pieces.empty());
1418
1419
if (flags & SHF_STRINGS)
1420
splitStrings(toStringRef(contentMaybeDecompress()), entsize);
1421
else
1422
splitNonStrings(contentMaybeDecompress(), entsize);
1423
}
1424
1425
SectionPiece &MergeInputSection::getSectionPiece(uint64_t offset) {
1426
if (content().size() <= offset)
1427
fatal(toString(this) + ": offset is outside the section");
1428
return partition_point(
1429
pieces, [=](SectionPiece p) { return p.inputOff <= offset; })[-1];
1430
}
1431
1432
// Return the offset in an output section for a given input offset.
1433
uint64_t MergeInputSection::getParentOffset(uint64_t offset) const {
1434
const SectionPiece &piece = getSectionPiece(offset);
1435
return piece.outputOff + (offset - piece.inputOff);
1436
}
1437
1438
template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
1439
StringRef);
1440
template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
1441
StringRef);
1442
template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
1443
StringRef);
1444
template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
1445
StringRef);
1446
1447
template void InputSection::writeTo<ELF32LE>(uint8_t *);
1448
template void InputSection::writeTo<ELF32BE>(uint8_t *);
1449
template void InputSection::writeTo<ELF64LE>(uint8_t *);
1450
template void InputSection::writeTo<ELF64BE>(uint8_t *);
1451
1452
template RelsOrRelas<ELF32LE>
1453
InputSectionBase::relsOrRelas<ELF32LE>(bool) const;
1454
template RelsOrRelas<ELF32BE>
1455
InputSectionBase::relsOrRelas<ELF32BE>(bool) const;
1456
template RelsOrRelas<ELF64LE>
1457
InputSectionBase::relsOrRelas<ELF64LE>(bool) const;
1458
template RelsOrRelas<ELF64BE>
1459
InputSectionBase::relsOrRelas<ELF64BE>(bool) const;
1460
1461
template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
1462
const ELF32LE::Shdr &, StringRef);
1463
template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
1464
const ELF32BE::Shdr &, StringRef);
1465
template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
1466
const ELF64LE::Shdr &, StringRef);
1467
template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
1468
const ELF64BE::Shdr &, StringRef);
1469
1470
template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
1471
const ELF32LE::Shdr &, StringRef);
1472
template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
1473
const ELF32BE::Shdr &, StringRef);
1474
template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
1475
const ELF64LE::Shdr &, StringRef);
1476
template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
1477
const ELF64BE::Shdr &, StringRef);
1478
1479
template void EhInputSection::split<ELF32LE>();
1480
template void EhInputSection::split<ELF32BE>();
1481
template void EhInputSection::split<ELF64LE>();
1482
template void EhInputSection::split<ELF64BE>();
1483
1484