Path: blob/main/contrib/llvm-project/lld/MachO/SyntheticSections.cpp
34869 views
//===- SyntheticSections.cpp ---------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#include "SyntheticSections.h"9#include "ConcatOutputSection.h"10#include "Config.h"11#include "ExportTrie.h"12#include "InputFiles.h"13#include "MachOStructs.h"14#include "ObjC.h"15#include "OutputSegment.h"16#include "SymbolTable.h"17#include "Symbols.h"1819#include "lld/Common/CommonLinkerContext.h"20#include "llvm/ADT/STLExtras.h"21#include "llvm/Config/llvm-config.h"22#include "llvm/Support/EndianStream.h"23#include "llvm/Support/FileSystem.h"24#include "llvm/Support/LEB128.h"25#include "llvm/Support/Parallel.h"26#include "llvm/Support/Path.h"27#include "llvm/Support/xxhash.h"2829#if defined(__APPLE__)30#include <sys/mman.h>3132#define COMMON_DIGEST_FOR_OPENSSL33#include <CommonCrypto/CommonDigest.h>34#else35#include "llvm/Support/SHA256.h"36#endif3738using namespace llvm;39using namespace llvm::MachO;40using namespace llvm::support;41using namespace llvm::support::endian;42using namespace lld;43using namespace lld::macho;4445// Reads `len` bytes at data and writes the 32-byte SHA256 checksum to `output`.46static void sha256(const uint8_t *data, size_t len, uint8_t *output) {47#if defined(__APPLE__)48// FIXME: Make LLVM's SHA256 faster and use it unconditionally. See PR5612149// for some notes on this.50CC_SHA256(data, len, output);51#else52ArrayRef<uint8_t> block(data, len);53std::array<uint8_t, 32> hash = SHA256::hash(block);54static_assert(hash.size() == CodeSignatureSection::hashSize);55memcpy(output, hash.data(), hash.size());56#endif57}5859InStruct macho::in;60std::vector<SyntheticSection *> macho::syntheticSections;6162SyntheticSection::SyntheticSection(const char *segname, const char *name)63: OutputSection(SyntheticKind, name) {64std::tie(this->segname, this->name) = maybeRenameSection({segname, name});65isec = makeSyntheticInputSection(segname, name);66isec->parent = this;67syntheticSections.push_back(this);68}6970// dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts71// from the beginning of the file (i.e. the header).72MachHeaderSection::MachHeaderSection()73: SyntheticSection(segment_names::text, section_names::header) {74// XXX: This is a hack. (See D97007)75// Setting the index to 1 to pretend that this section is the text76// section.77index = 1;78isec->isFinal = true;79}8081void MachHeaderSection::addLoadCommand(LoadCommand *lc) {82loadCommands.push_back(lc);83sizeOfCmds += lc->getSize();84}8586uint64_t MachHeaderSection::getSize() const {87uint64_t size = target->headerSize + sizeOfCmds + config->headerPad;88// If we are emitting an encryptable binary, our load commands must have a89// separate (non-encrypted) page to themselves.90if (config->emitEncryptionInfo)91size = alignToPowerOf2(size, target->getPageSize());92return size;93}9495static uint32_t cpuSubtype() {96uint32_t subtype = target->cpuSubtype;9798if (config->outputType == MH_EXECUTE && !config->staticLink &&99target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&100config->platform() == PLATFORM_MACOS &&101config->platformInfo.target.MinDeployment >= VersionTuple(10, 5))102subtype |= CPU_SUBTYPE_LIB64;103104return subtype;105}106107static bool hasWeakBinding() {108return config->emitChainedFixups ? in.chainedFixups->hasWeakBinding()109: in.weakBinding->hasEntry();110}111112static bool hasNonWeakDefinition() {113return config->emitChainedFixups ? in.chainedFixups->hasNonWeakDefinition()114: in.weakBinding->hasNonWeakDefinition();115}116117void MachHeaderSection::writeTo(uint8_t *buf) const {118auto *hdr = reinterpret_cast<mach_header *>(buf);119hdr->magic = target->magic;120hdr->cputype = target->cpuType;121hdr->cpusubtype = cpuSubtype();122hdr->filetype = config->outputType;123hdr->ncmds = loadCommands.size();124hdr->sizeofcmds = sizeOfCmds;125hdr->flags = MH_DYLDLINK;126127if (config->namespaceKind == NamespaceKind::twolevel)128hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL;129130if (config->outputType == MH_DYLIB && !config->hasReexports)131hdr->flags |= MH_NO_REEXPORTED_DYLIBS;132133if (config->markDeadStrippableDylib)134hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB;135136if (config->outputType == MH_EXECUTE && config->isPic)137hdr->flags |= MH_PIE;138139if (config->outputType == MH_DYLIB && config->applicationExtension)140hdr->flags |= MH_APP_EXTENSION_SAFE;141142if (in.exports->hasWeakSymbol || hasNonWeakDefinition())143hdr->flags |= MH_WEAK_DEFINES;144145if (in.exports->hasWeakSymbol || hasWeakBinding())146hdr->flags |= MH_BINDS_TO_WEAK;147148for (const OutputSegment *seg : outputSegments) {149for (const OutputSection *osec : seg->getSections()) {150if (isThreadLocalVariables(osec->flags)) {151hdr->flags |= MH_HAS_TLV_DESCRIPTORS;152break;153}154}155}156157uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize;158for (const LoadCommand *lc : loadCommands) {159lc->writeTo(p);160p += lc->getSize();161}162}163164PageZeroSection::PageZeroSection()165: SyntheticSection(segment_names::pageZero, section_names::pageZero) {}166167RebaseSection::RebaseSection()168: LinkEditSection(segment_names::linkEdit, section_names::rebase) {}169170namespace {171struct RebaseState {172uint64_t sequenceLength;173uint64_t skipLength;174};175} // namespace176177static void emitIncrement(uint64_t incr, raw_svector_ostream &os) {178assert(incr != 0);179180if ((incr >> target->p2WordSize) <= REBASE_IMMEDIATE_MASK &&181(incr % target->wordSize) == 0) {182os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_IMM_SCALED |183(incr >> target->p2WordSize));184} else {185os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB);186encodeULEB128(incr, os);187}188}189190static void flushRebase(const RebaseState &state, raw_svector_ostream &os) {191assert(state.sequenceLength > 0);192193if (state.skipLength == target->wordSize) {194if (state.sequenceLength <= REBASE_IMMEDIATE_MASK) {195os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES |196state.sequenceLength);197} else {198os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);199encodeULEB128(state.sequenceLength, os);200}201} else if (state.sequenceLength == 1) {202os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);203encodeULEB128(state.skipLength - target->wordSize, os);204} else {205os << static_cast<uint8_t>(206REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);207encodeULEB128(state.sequenceLength, os);208encodeULEB128(state.skipLength - target->wordSize, os);209}210}211212// Rebases are communicated to dyld using a bytecode, whose opcodes cause the213// memory location at a specific address to be rebased and/or the address to be214// incremented.215//216// Opcode REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB is the most generic217// one, encoding a series of evenly spaced addresses. This algorithm works by218// splitting up the sorted list of addresses into such chunks. If the locations219// are consecutive or the sequence consists of a single location, flushRebase220// will use a smaller, more specialized encoding.221static void encodeRebases(const OutputSegment *seg,222MutableArrayRef<Location> locations,223raw_svector_ostream &os) {224// dyld operates on segments. Translate section offsets into segment offsets.225for (Location &loc : locations)226loc.offset =227loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset);228// The algorithm assumes that locations are unique.229Location *end =230llvm::unique(locations, [](const Location &a, const Location &b) {231return a.offset == b.offset;232});233size_t count = end - locations.begin();234235os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |236seg->index);237assert(!locations.empty());238uint64_t offset = locations[0].offset;239encodeULEB128(offset, os);240241RebaseState state{1, target->wordSize};242243for (size_t i = 1; i < count; ++i) {244offset = locations[i].offset;245246uint64_t skip = offset - locations[i - 1].offset;247assert(skip != 0 && "duplicate locations should have been weeded out");248249if (skip == state.skipLength) {250++state.sequenceLength;251} else if (state.sequenceLength == 1) {252++state.sequenceLength;253state.skipLength = skip;254} else if (skip < state.skipLength) {255// The address is lower than what the rebase pointer would be if the last256// location would be part of a sequence. We start a new sequence from the257// previous location.258--state.sequenceLength;259flushRebase(state, os);260261state.sequenceLength = 2;262state.skipLength = skip;263} else {264// The address is at some positive offset from the rebase pointer. We265// start a new sequence which begins with the current location.266flushRebase(state, os);267emitIncrement(skip - state.skipLength, os);268state.sequenceLength = 1;269state.skipLength = target->wordSize;270}271}272flushRebase(state, os);273}274275void RebaseSection::finalizeContents() {276if (locations.empty())277return;278279raw_svector_ostream os{contents};280os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER);281282llvm::sort(locations, [](const Location &a, const Location &b) {283return a.isec->getVA(a.offset) < b.isec->getVA(b.offset);284});285286for (size_t i = 0, count = locations.size(); i < count;) {287const OutputSegment *seg = locations[i].isec->parent->parent;288size_t j = i + 1;289while (j < count && locations[j].isec->parent->parent == seg)290++j;291encodeRebases(seg, {locations.data() + i, locations.data() + j}, os);292i = j;293}294os << static_cast<uint8_t>(REBASE_OPCODE_DONE);295}296297void RebaseSection::writeTo(uint8_t *buf) const {298memcpy(buf, contents.data(), contents.size());299}300301NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname,302const char *name)303: SyntheticSection(segname, name) {304align = target->wordSize;305}306307void macho::addNonLazyBindingEntries(const Symbol *sym,308const InputSection *isec, uint64_t offset,309int64_t addend) {310if (config->emitChainedFixups) {311if (needsBinding(sym))312in.chainedFixups->addBinding(sym, isec, offset, addend);313else if (isa<Defined>(sym))314in.chainedFixups->addRebase(isec, offset);315else316llvm_unreachable("cannot bind to an undefined symbol");317return;318}319320if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {321in.binding->addEntry(dysym, isec, offset, addend);322if (dysym->isWeakDef())323in.weakBinding->addEntry(sym, isec, offset, addend);324} else if (const auto *defined = dyn_cast<Defined>(sym)) {325in.rebase->addEntry(isec, offset);326if (defined->isExternalWeakDef())327in.weakBinding->addEntry(sym, isec, offset, addend);328else if (defined->interposable)329in.binding->addEntry(sym, isec, offset, addend);330} else {331// Undefined symbols are filtered out in scanRelocations(); we should never332// get here333llvm_unreachable("cannot bind to an undefined symbol");334}335}336337void NonLazyPointerSectionBase::addEntry(Symbol *sym) {338if (entries.insert(sym)) {339assert(!sym->isInGot());340sym->gotIndex = entries.size() - 1;341342addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize);343}344}345346void macho::writeChainedRebase(uint8_t *buf, uint64_t targetVA) {347assert(config->emitChainedFixups);348assert(target->wordSize == 8 && "Only 64-bit platforms are supported");349auto *rebase = reinterpret_cast<dyld_chained_ptr_64_rebase *>(buf);350rebase->target = targetVA & 0xf'ffff'ffff;351rebase->high8 = (targetVA >> 56);352rebase->reserved = 0;353rebase->next = 0;354rebase->bind = 0;355356// The fixup format places a 64 GiB limit on the output's size.357// Should we handle this gracefully?358uint64_t encodedVA = rebase->target | ((uint64_t)rebase->high8 << 56);359if (encodedVA != targetVA)360error("rebase target address 0x" + Twine::utohexstr(targetVA) +361" does not fit into chained fixup. Re-link with -no_fixup_chains");362}363364static void writeChainedBind(uint8_t *buf, const Symbol *sym, int64_t addend) {365assert(config->emitChainedFixups);366assert(target->wordSize == 8 && "Only 64-bit platforms are supported");367auto *bind = reinterpret_cast<dyld_chained_ptr_64_bind *>(buf);368auto [ordinal, inlineAddend] = in.chainedFixups->getBinding(sym, addend);369bind->ordinal = ordinal;370bind->addend = inlineAddend;371bind->reserved = 0;372bind->next = 0;373bind->bind = 1;374}375376void macho::writeChainedFixup(uint8_t *buf, const Symbol *sym, int64_t addend) {377if (needsBinding(sym))378writeChainedBind(buf, sym, addend);379else380writeChainedRebase(buf, sym->getVA() + addend);381}382383void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const {384if (config->emitChainedFixups) {385for (const auto &[i, entry] : llvm::enumerate(entries))386writeChainedFixup(&buf[i * target->wordSize], entry, 0);387} else {388for (const auto &[i, entry] : llvm::enumerate(entries))389if (auto *defined = dyn_cast<Defined>(entry))390write64le(&buf[i * target->wordSize], defined->getVA());391}392}393394GotSection::GotSection()395: NonLazyPointerSectionBase(segment_names::data, section_names::got) {396flags = S_NON_LAZY_SYMBOL_POINTERS;397}398399TlvPointerSection::TlvPointerSection()400: NonLazyPointerSectionBase(segment_names::data,401section_names::threadPtrs) {402flags = S_THREAD_LOCAL_VARIABLE_POINTERS;403}404405BindingSection::BindingSection()406: LinkEditSection(segment_names::linkEdit, section_names::binding) {}407408namespace {409struct Binding {410OutputSegment *segment = nullptr;411uint64_t offset = 0;412int64_t addend = 0;413};414struct BindIR {415// Default value of 0xF0 is not valid opcode and should make the program416// scream instead of accidentally writing "valid" values.417uint8_t opcode = 0xF0;418uint64_t data = 0;419uint64_t consecutiveCount = 0;420};421} // namespace422423// Encode a sequence of opcodes that tell dyld to write the address of symbol +424// addend at osec->addr + outSecOff.425//426// The bind opcode "interpreter" remembers the values of each binding field, so427// we only need to encode the differences between bindings. Hence the use of428// lastBinding.429static void encodeBinding(const OutputSection *osec, uint64_t outSecOff,430int64_t addend, Binding &lastBinding,431std::vector<BindIR> &opcodes) {432OutputSegment *seg = osec->parent;433uint64_t offset = osec->getSegmentOffset() + outSecOff;434if (lastBinding.segment != seg) {435opcodes.push_back(436{static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |437seg->index),438offset});439lastBinding.segment = seg;440lastBinding.offset = offset;441} else if (lastBinding.offset != offset) {442opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset});443lastBinding.offset = offset;444}445446if (lastBinding.addend != addend) {447opcodes.push_back(448{BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)});449lastBinding.addend = addend;450}451452opcodes.push_back({BIND_OPCODE_DO_BIND, 0});453// DO_BIND causes dyld to both perform the binding and increment the offset454lastBinding.offset += target->wordSize;455}456457static void optimizeOpcodes(std::vector<BindIR> &opcodes) {458// Pass 1: Combine bind/add pairs459size_t i;460int pWrite = 0;461for (i = 1; i < opcodes.size(); ++i, ++pWrite) {462if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) &&463(opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) {464opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;465opcodes[pWrite].data = opcodes[i].data;466++i;467} else {468opcodes[pWrite] = opcodes[i - 1];469}470}471if (i == opcodes.size())472opcodes[pWrite] = opcodes[i - 1];473opcodes.resize(pWrite + 1);474475// Pass 2: Compress two or more bind_add opcodes476pWrite = 0;477for (i = 1; i < opcodes.size(); ++i, ++pWrite) {478if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&479(opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&480(opcodes[i].data == opcodes[i - 1].data)) {481opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;482opcodes[pWrite].consecutiveCount = 2;483opcodes[pWrite].data = opcodes[i].data;484++i;485while (i < opcodes.size() &&486(opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&487(opcodes[i].data == opcodes[i - 1].data)) {488opcodes[pWrite].consecutiveCount++;489++i;490}491} else {492opcodes[pWrite] = opcodes[i - 1];493}494}495if (i == opcodes.size())496opcodes[pWrite] = opcodes[i - 1];497opcodes.resize(pWrite + 1);498499// Pass 3: Use immediate encodings500// Every binding is the size of one pointer. If the next binding is a501// multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the502// opcode can be scaled by wordSize into a single byte and dyld will503// expand it to the correct address.504for (auto &p : opcodes) {505// It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK,506// but ld64 currently does this. This could be a potential bug, but507// for now, perform the same behavior to prevent mysterious bugs.508if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&509((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) &&510((p.data % target->wordSize) == 0)) {511p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;512p.data /= target->wordSize;513}514}515}516517static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) {518uint8_t opcode = op.opcode & BIND_OPCODE_MASK;519switch (opcode) {520case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:521case BIND_OPCODE_ADD_ADDR_ULEB:522case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:523os << op.opcode;524encodeULEB128(op.data, os);525break;526case BIND_OPCODE_SET_ADDEND_SLEB:527os << op.opcode;528encodeSLEB128(static_cast<int64_t>(op.data), os);529break;530case BIND_OPCODE_DO_BIND:531os << op.opcode;532break;533case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:534os << op.opcode;535encodeULEB128(op.consecutiveCount, os);536encodeULEB128(op.data, os);537break;538case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:539os << static_cast<uint8_t>(op.opcode | op.data);540break;541default:542llvm_unreachable("cannot bind to an unrecognized symbol");543}544}545546static bool needsWeakBind(const Symbol &sym) {547if (auto *dysym = dyn_cast<DylibSymbol>(&sym))548return dysym->isWeakDef();549if (auto *defined = dyn_cast<Defined>(&sym))550return defined->isExternalWeakDef();551return false;552}553554// Non-weak bindings need to have their dylib ordinal encoded as well.555static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) {556if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup())557return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP);558assert(dysym.getFile()->isReferenced());559return dysym.getFile()->ordinal;560}561562static int16_t ordinalForSymbol(const Symbol &sym) {563if (config->emitChainedFixups && needsWeakBind(sym))564return BIND_SPECIAL_DYLIB_WEAK_LOOKUP;565if (const auto *dysym = dyn_cast<DylibSymbol>(&sym))566return ordinalForDylibSymbol(*dysym);567assert(cast<Defined>(&sym)->interposable);568return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;569}570571static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) {572if (ordinal <= 0) {573os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |574(ordinal & BIND_IMMEDIATE_MASK));575} else if (ordinal <= BIND_IMMEDIATE_MASK) {576os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);577} else {578os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);579encodeULEB128(ordinal, os);580}581}582583static void encodeWeakOverride(const Defined *defined,584raw_svector_ostream &os) {585os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM |586BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION)587<< defined->getName() << '\0';588}589590// Organize the bindings so we can encoded them with fewer opcodes.591//592// First, all bindings for a given symbol should be grouped together.593// BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it594// has an associated symbol string), so we only want to emit it once per symbol.595//596// Within each group, we sort the bindings by address. Since bindings are597// delta-encoded, sorting them allows for a more compact result. Note that598// sorting by address alone ensures that bindings for the same segment / section599// are located together, minimizing the number of times we have to emit600// BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB.601//602// Finally, we sort the symbols by the address of their first binding, again603// to facilitate the delta-encoding process.604template <class Sym>605std::vector<std::pair<const Sym *, std::vector<BindingEntry>>>606sortBindings(const BindingsMap<const Sym *> &bindingsMap) {607std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec(608bindingsMap.begin(), bindingsMap.end());609for (auto &p : bindingsVec) {610std::vector<BindingEntry> &bindings = p.second;611llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {612return a.target.getVA() < b.target.getVA();613});614}615llvm::sort(bindingsVec, [](const auto &a, const auto &b) {616return a.second[0].target.getVA() < b.second[0].target.getVA();617});618return bindingsVec;619}620621// Emit bind opcodes, which are a stream of byte-sized opcodes that dyld622// interprets to update a record with the following fields:623// * segment index (of the segment to write the symbol addresses to, typically624// the __DATA_CONST segment which contains the GOT)625// * offset within the segment, indicating the next location to write a binding626// * symbol type627// * symbol library ordinal (the index of its library's LC_LOAD_DYLIB command)628// * symbol name629// * addend630// When dyld sees BIND_OPCODE_DO_BIND, it uses the current record state to bind631// a symbol in the GOT, and increments the segment offset to point to the next632// entry. It does *not* clear the record state after doing the bind, so633// subsequent opcodes only need to encode the differences between bindings.634void BindingSection::finalizeContents() {635raw_svector_ostream os{contents};636Binding lastBinding;637int16_t lastOrdinal = 0;638639for (auto &p : sortBindings(bindingsMap)) {640const Symbol *sym = p.first;641std::vector<BindingEntry> &bindings = p.second;642uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;643if (sym->isWeakRef())644flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;645os << flags << sym->getName() << '\0'646<< static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);647int16_t ordinal = ordinalForSymbol(*sym);648if (ordinal != lastOrdinal) {649encodeDylibOrdinal(ordinal, os);650lastOrdinal = ordinal;651}652std::vector<BindIR> opcodes;653for (const BindingEntry &b : bindings)654encodeBinding(b.target.isec->parent,655b.target.isec->getOffset(b.target.offset), b.addend,656lastBinding, opcodes);657if (config->optimize > 1)658optimizeOpcodes(opcodes);659for (const auto &op : opcodes)660flushOpcodes(op, os);661}662if (!bindingsMap.empty())663os << static_cast<uint8_t>(BIND_OPCODE_DONE);664}665666void BindingSection::writeTo(uint8_t *buf) const {667memcpy(buf, contents.data(), contents.size());668}669670WeakBindingSection::WeakBindingSection()671: LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {}672673void WeakBindingSection::finalizeContents() {674raw_svector_ostream os{contents};675Binding lastBinding;676677for (const Defined *defined : definitions)678encodeWeakOverride(defined, os);679680for (auto &p : sortBindings(bindingsMap)) {681const Symbol *sym = p.first;682std::vector<BindingEntry> &bindings = p.second;683os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)684<< sym->getName() << '\0'685<< static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);686std::vector<BindIR> opcodes;687for (const BindingEntry &b : bindings)688encodeBinding(b.target.isec->parent,689b.target.isec->getOffset(b.target.offset), b.addend,690lastBinding, opcodes);691if (config->optimize > 1)692optimizeOpcodes(opcodes);693for (const auto &op : opcodes)694flushOpcodes(op, os);695}696if (!bindingsMap.empty() || !definitions.empty())697os << static_cast<uint8_t>(BIND_OPCODE_DONE);698}699700void WeakBindingSection::writeTo(uint8_t *buf) const {701memcpy(buf, contents.data(), contents.size());702}703704StubsSection::StubsSection()705: SyntheticSection(segment_names::text, section_names::stubs) {706flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;707// The stubs section comprises machine instructions, which are aligned to708// 4 bytes on the archs we care about.709align = 4;710reserved2 = target->stubSize;711}712713uint64_t StubsSection::getSize() const {714return entries.size() * target->stubSize;715}716717void StubsSection::writeTo(uint8_t *buf) const {718size_t off = 0;719for (const Symbol *sym : entries) {720uint64_t pointerVA =721config->emitChainedFixups ? sym->getGotVA() : sym->getLazyPtrVA();722target->writeStub(buf + off, *sym, pointerVA);723off += target->stubSize;724}725}726727void StubsSection::finalize() { isFinal = true; }728729static void addBindingsForStub(Symbol *sym) {730assert(!config->emitChainedFixups);731if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {732if (sym->isWeakDef()) {733in.binding->addEntry(dysym, in.lazyPointers->isec,734sym->stubsIndex * target->wordSize);735in.weakBinding->addEntry(sym, in.lazyPointers->isec,736sym->stubsIndex * target->wordSize);737} else {738in.lazyBinding->addEntry(dysym);739}740} else if (auto *defined = dyn_cast<Defined>(sym)) {741if (defined->isExternalWeakDef()) {742in.rebase->addEntry(in.lazyPointers->isec,743sym->stubsIndex * target->wordSize);744in.weakBinding->addEntry(sym, in.lazyPointers->isec,745sym->stubsIndex * target->wordSize);746} else if (defined->interposable) {747in.lazyBinding->addEntry(sym);748} else {749llvm_unreachable("invalid stub target");750}751} else {752llvm_unreachable("invalid stub target symbol type");753}754}755756void StubsSection::addEntry(Symbol *sym) {757bool inserted = entries.insert(sym);758if (inserted) {759sym->stubsIndex = entries.size() - 1;760761if (config->emitChainedFixups)762in.got->addEntry(sym);763else764addBindingsForStub(sym);765}766}767768StubHelperSection::StubHelperSection()769: SyntheticSection(segment_names::text, section_names::stubHelper) {770flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;771align = 4; // This section comprises machine instructions772}773774uint64_t StubHelperSection::getSize() const {775return target->stubHelperHeaderSize +776in.lazyBinding->getEntries().size() * target->stubHelperEntrySize;777}778779bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); }780781void StubHelperSection::writeTo(uint8_t *buf) const {782target->writeStubHelperHeader(buf);783size_t off = target->stubHelperHeaderSize;784for (const Symbol *sym : in.lazyBinding->getEntries()) {785target->writeStubHelperEntry(buf + off, *sym, addr + off);786off += target->stubHelperEntrySize;787}788}789790void StubHelperSection::setUp() {791Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr,792/*isWeakRef=*/false);793if (auto *undefined = dyn_cast<Undefined>(binder))794treatUndefinedSymbol(*undefined,795"lazy binding (normally in libSystem.dylib)");796797// treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check.798stubBinder = dyn_cast_or_null<DylibSymbol>(binder);799if (stubBinder == nullptr)800return;801802in.got->addEntry(stubBinder);803804in.imageLoaderCache->parent =805ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);806addInputSection(in.imageLoaderCache);807// Since this isn't in the symbol table or in any input file, the noDeadStrip808// argument doesn't matter.809dyldPrivate =810make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0,811/*isWeakDef=*/false,812/*isExternal=*/false, /*isPrivateExtern=*/false,813/*includeInSymtab=*/true,814/*isReferencedDynamically=*/false,815/*noDeadStrip=*/false);816dyldPrivate->used = true;817}818819llvm::DenseMap<llvm::CachedHashStringRef, ConcatInputSection *>820ObjCSelRefsHelper::methnameToSelref;821void ObjCSelRefsHelper::initialize() {822// Do not fold selrefs without ICF.823if (config->icfLevel == ICFLevel::none)824return;825826// Search methnames already referenced in __objc_selrefs827// Map the name to the corresponding selref entry828// which we will reuse when creating objc stubs.829for (ConcatInputSection *isec : inputSections) {830if (isec->shouldOmitFromOutput())831continue;832if (isec->getName() != section_names::objcSelrefs)833continue;834// We expect a single relocation per selref entry to __objc_methname that835// might be aggregated.836assert(isec->relocs.size() == 1);837auto Reloc = isec->relocs[0];838if (const auto *sym = Reloc.referent.dyn_cast<Symbol *>()) {839if (const auto *d = dyn_cast<Defined>(sym)) {840auto *cisec = cast<CStringInputSection>(d->isec());841auto methname = cisec->getStringRefAtOffset(d->value);842methnameToSelref[CachedHashStringRef(methname)] = isec;843}844}845}846}847848void ObjCSelRefsHelper::cleanup() { methnameToSelref.clear(); }849850ConcatInputSection *ObjCSelRefsHelper::makeSelRef(StringRef methname) {851auto methnameOffset =852in.objcMethnameSection->getStringOffset(methname).outSecOff;853854size_t wordSize = target->wordSize;855uint8_t *selrefData = bAlloc().Allocate<uint8_t>(wordSize);856write64le(selrefData, methnameOffset);857ConcatInputSection *objcSelref =858makeSyntheticInputSection(segment_names::data, section_names::objcSelrefs,859S_LITERAL_POINTERS | S_ATTR_NO_DEAD_STRIP,860ArrayRef<uint8_t>{selrefData, wordSize},861/*align=*/wordSize);862assert(objcSelref->live);863objcSelref->relocs.push_back({/*type=*/target->unsignedRelocType,864/*pcrel=*/false, /*length=*/3,865/*offset=*/0,866/*addend=*/static_cast<int64_t>(methnameOffset),867/*referent=*/in.objcMethnameSection->isec});868objcSelref->parent = ConcatOutputSection::getOrCreateForInput(objcSelref);869addInputSection(objcSelref);870objcSelref->isFinal = true;871methnameToSelref[CachedHashStringRef(methname)] = objcSelref;872return objcSelref;873}874875ConcatInputSection *ObjCSelRefsHelper::getSelRef(StringRef methname) {876auto it = methnameToSelref.find(CachedHashStringRef(methname));877if (it == methnameToSelref.end())878return nullptr;879return it->second;880}881882ObjCStubsSection::ObjCStubsSection()883: SyntheticSection(segment_names::text, section_names::objcStubs) {884flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;885align = config->objcStubsMode == ObjCStubsMode::fast886? target->objcStubsFastAlignment887: target->objcStubsSmallAlignment;888}889890bool ObjCStubsSection::isObjCStubSymbol(Symbol *sym) {891return sym->getName().starts_with(symbolPrefix);892}893894StringRef ObjCStubsSection::getMethname(Symbol *sym) {895assert(isObjCStubSymbol(sym) && "not an objc stub");896auto name = sym->getName();897StringRef methname = name.drop_front(symbolPrefix.size());898return methname;899}900901void ObjCStubsSection::addEntry(Symbol *sym) {902StringRef methname = getMethname(sym);903// We create a selref entry for each unique methname.904if (!ObjCSelRefsHelper::getSelRef(methname))905ObjCSelRefsHelper::makeSelRef(methname);906907auto stubSize = config->objcStubsMode == ObjCStubsMode::fast908? target->objcStubsFastSize909: target->objcStubsSmallSize;910Defined *newSym = replaceSymbol<Defined>(911sym, sym->getName(), nullptr, isec,912/*value=*/symbols.size() * stubSize,913/*size=*/stubSize,914/*isWeakDef=*/false, /*isExternal=*/true, /*isPrivateExtern=*/true,915/*includeInSymtab=*/true, /*isReferencedDynamically=*/false,916/*noDeadStrip=*/false);917symbols.push_back(newSym);918}919920void ObjCStubsSection::setUp() {921objcMsgSend = symtab->addUndefined("_objc_msgSend", /*file=*/nullptr,922/*isWeakRef=*/false);923if (auto *undefined = dyn_cast<Undefined>(objcMsgSend))924treatUndefinedSymbol(*undefined,925"lazy binding (normally in libobjc.dylib)");926objcMsgSend->used = true;927if (config->objcStubsMode == ObjCStubsMode::fast) {928in.got->addEntry(objcMsgSend);929assert(objcMsgSend->isInGot());930} else {931assert(config->objcStubsMode == ObjCStubsMode::small);932// In line with ld64's behavior, when objc_msgSend is a direct symbol,933// we directly reference it.934// In other cases, typically when binding in libobjc.dylib,935// we generate a stub to invoke objc_msgSend.936if (!isa<Defined>(objcMsgSend))937in.stubs->addEntry(objcMsgSend);938}939}940941uint64_t ObjCStubsSection::getSize() const {942auto stubSize = config->objcStubsMode == ObjCStubsMode::fast943? target->objcStubsFastSize944: target->objcStubsSmallSize;945return stubSize * symbols.size();946}947948void ObjCStubsSection::writeTo(uint8_t *buf) const {949uint64_t stubOffset = 0;950for (size_t i = 0, n = symbols.size(); i < n; ++i) {951Defined *sym = symbols[i];952953auto methname = getMethname(sym);954InputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);955assert(selRef != nullptr && "no selref for methname");956auto selrefAddr = selRef->getVA(0);957target->writeObjCMsgSendStub(buf + stubOffset, sym, in.objcStubs->addr,958stubOffset, selrefAddr, objcMsgSend);959}960}961962LazyPointerSection::LazyPointerSection()963: SyntheticSection(segment_names::data, section_names::lazySymbolPtr) {964align = target->wordSize;965flags = S_LAZY_SYMBOL_POINTERS;966}967968uint64_t LazyPointerSection::getSize() const {969return in.stubs->getEntries().size() * target->wordSize;970}971972bool LazyPointerSection::isNeeded() const {973return !in.stubs->getEntries().empty();974}975976void LazyPointerSection::writeTo(uint8_t *buf) const {977size_t off = 0;978for (const Symbol *sym : in.stubs->getEntries()) {979if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) {980if (dysym->hasStubsHelper()) {981uint64_t stubHelperOffset =982target->stubHelperHeaderSize +983dysym->stubsHelperIndex * target->stubHelperEntrySize;984write64le(buf + off, in.stubHelper->addr + stubHelperOffset);985}986} else {987write64le(buf + off, sym->getVA());988}989off += target->wordSize;990}991}992993LazyBindingSection::LazyBindingSection()994: LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {}995996void LazyBindingSection::finalizeContents() {997// TODO: Just precompute output size here instead of writing to a temporary998// buffer999for (Symbol *sym : entries)1000sym->lazyBindOffset = encode(*sym);1001}10021003void LazyBindingSection::writeTo(uint8_t *buf) const {1004memcpy(buf, contents.data(), contents.size());1005}10061007void LazyBindingSection::addEntry(Symbol *sym) {1008assert(!config->emitChainedFixups && "Chained fixups always bind eagerly");1009if (entries.insert(sym)) {1010sym->stubsHelperIndex = entries.size() - 1;1011in.rebase->addEntry(in.lazyPointers->isec,1012sym->stubsIndex * target->wordSize);1013}1014}10151016// Unlike the non-lazy binding section, the bind opcodes in this section aren't1017// interpreted all at once. Rather, dyld will start interpreting opcodes at a1018// given offset, typically only binding a single symbol before it finds a1019// BIND_OPCODE_DONE terminator. As such, unlike in the non-lazy-binding case,1020// we cannot encode just the differences between symbols; we have to emit the1021// complete bind information for each symbol.1022uint32_t LazyBindingSection::encode(const Symbol &sym) {1023uint32_t opstreamOffset = contents.size();1024OutputSegment *dataSeg = in.lazyPointers->parent;1025os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |1026dataSeg->index);1027uint64_t offset =1028in.lazyPointers->addr - dataSeg->addr + sym.stubsIndex * target->wordSize;1029encodeULEB128(offset, os);1030encodeDylibOrdinal(ordinalForSymbol(sym), os);10311032uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;1033if (sym.isWeakRef())1034flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;10351036os << flags << sym.getName() << '\0'1037<< static_cast<uint8_t>(BIND_OPCODE_DO_BIND)1038<< static_cast<uint8_t>(BIND_OPCODE_DONE);1039return opstreamOffset;1040}10411042ExportSection::ExportSection()1043: LinkEditSection(segment_names::linkEdit, section_names::export_) {}10441045void ExportSection::finalizeContents() {1046trieBuilder.setImageBase(in.header->addr);1047for (const Symbol *sym : symtab->getSymbols()) {1048if (const auto *defined = dyn_cast<Defined>(sym)) {1049if (defined->privateExtern || !defined->isLive())1050continue;1051trieBuilder.addSymbol(*defined);1052hasWeakSymbol = hasWeakSymbol || sym->isWeakDef();1053} else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {1054if (dysym->shouldReexport)1055trieBuilder.addSymbol(*dysym);1056}1057}1058size = trieBuilder.build();1059}10601061void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); }10621063DataInCodeSection::DataInCodeSection()1064: LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {}10651066template <class LP>1067static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() {1068std::vector<MachO::data_in_code_entry> dataInCodeEntries;1069for (const InputFile *inputFile : inputFiles) {1070if (!isa<ObjFile>(inputFile))1071continue;1072const ObjFile *objFile = cast<ObjFile>(inputFile);1073ArrayRef<MachO::data_in_code_entry> entries = objFile->getDataInCode();1074if (entries.empty())1075continue;10761077std::vector<MachO::data_in_code_entry> sortedEntries;1078sortedEntries.assign(entries.begin(), entries.end());1079llvm::sort(sortedEntries, [](const data_in_code_entry &lhs,1080const data_in_code_entry &rhs) {1081return lhs.offset < rhs.offset;1082});10831084// For each code subsection find 'data in code' entries residing in it.1085// Compute the new offset values as1086// <offset within subsection> + <subsection address> - <__TEXT address>.1087for (const Section *section : objFile->sections) {1088for (const Subsection &subsec : section->subsections) {1089const InputSection *isec = subsec.isec;1090if (!isCodeSection(isec))1091continue;1092if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput())1093continue;1094const uint64_t beginAddr = section->addr + subsec.offset;1095auto it = llvm::lower_bound(1096sortedEntries, beginAddr,1097[](const MachO::data_in_code_entry &entry, uint64_t addr) {1098return entry.offset < addr;1099});1100const uint64_t endAddr = beginAddr + isec->getSize();1101for (const auto end = sortedEntries.end();1102it != end && it->offset + it->length <= endAddr; ++it)1103dataInCodeEntries.push_back(1104{static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) -1105in.header->addr),1106it->length, it->kind});1107}1108}1109}11101111// ld64 emits the table in sorted order too.1112llvm::sort(dataInCodeEntries,1113[](const data_in_code_entry &lhs, const data_in_code_entry &rhs) {1114return lhs.offset < rhs.offset;1115});1116return dataInCodeEntries;1117}11181119void DataInCodeSection::finalizeContents() {1120entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>()1121: collectDataInCodeEntries<ILP32>();1122}11231124void DataInCodeSection::writeTo(uint8_t *buf) const {1125if (!entries.empty())1126memcpy(buf, entries.data(), getRawSize());1127}11281129FunctionStartsSection::FunctionStartsSection()1130: LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {}11311132void FunctionStartsSection::finalizeContents() {1133raw_svector_ostream os{contents};1134std::vector<uint64_t> addrs;1135for (const InputFile *file : inputFiles) {1136if (auto *objFile = dyn_cast<ObjFile>(file)) {1137for (const Symbol *sym : objFile->symbols) {1138if (const auto *defined = dyn_cast_or_null<Defined>(sym)) {1139if (!defined->isec() || !isCodeSection(defined->isec()) ||1140!defined->isLive())1141continue;1142addrs.push_back(defined->getVA());1143}1144}1145}1146}1147llvm::sort(addrs);1148uint64_t addr = in.header->addr;1149for (uint64_t nextAddr : addrs) {1150uint64_t delta = nextAddr - addr;1151if (delta == 0)1152continue;1153encodeULEB128(delta, os);1154addr = nextAddr;1155}1156os << '\0';1157}11581159void FunctionStartsSection::writeTo(uint8_t *buf) const {1160memcpy(buf, contents.data(), contents.size());1161}11621163SymtabSection::SymtabSection(StringTableSection &stringTableSection)1164: LinkEditSection(segment_names::linkEdit, section_names::symbolTable),1165stringTableSection(stringTableSection) {}11661167void SymtabSection::emitBeginSourceStab(StringRef sourceFile) {1168StabsEntry stab(N_SO);1169stab.strx = stringTableSection.addString(saver().save(sourceFile));1170stabs.emplace_back(std::move(stab));1171}11721173void SymtabSection::emitEndSourceStab() {1174StabsEntry stab(N_SO);1175stab.sect = 1;1176stabs.emplace_back(std::move(stab));1177}11781179void SymtabSection::emitObjectFileStab(ObjFile *file) {1180StabsEntry stab(N_OSO);1181stab.sect = target->cpuSubtype;1182SmallString<261> path(!file->archiveName.empty() ? file->archiveName1183: file->getName());1184std::error_code ec = sys::fs::make_absolute(path);1185if (ec)1186fatal("failed to get absolute path for " + path);11871188if (!file->archiveName.empty())1189path.append({"(", file->getName(), ")"});11901191StringRef adjustedPath = saver().save(path.str());1192adjustedPath.consume_front(config->osoPrefix);11931194stab.strx = stringTableSection.addString(adjustedPath);1195stab.desc = 1;1196stab.value = file->modTime;1197stabs.emplace_back(std::move(stab));1198}11991200void SymtabSection::emitEndFunStab(Defined *defined) {1201StabsEntry stab(N_FUN);1202stab.value = defined->size;1203stabs.emplace_back(std::move(stab));1204}12051206void SymtabSection::emitStabs() {1207if (config->omitDebugInfo)1208return;12091210for (const std::string &s : config->astPaths) {1211StabsEntry astStab(N_AST);1212astStab.strx = stringTableSection.addString(s);1213stabs.emplace_back(std::move(astStab));1214}12151216// Cache the file ID for each symbol in an std::pair for faster sorting.1217using SortingPair = std::pair<Defined *, int>;1218std::vector<SortingPair> symbolsNeedingStabs;1219for (const SymtabEntry &entry :1220concat<SymtabEntry>(localSymbols, externalSymbols)) {1221Symbol *sym = entry.sym;1222assert(sym->isLive() &&1223"dead symbols should not be in localSymbols, externalSymbols");1224if (auto *defined = dyn_cast<Defined>(sym)) {1225// Excluded symbols should have been filtered out in finalizeContents().1226assert(defined->includeInSymtab);12271228if (defined->isAbsolute())1229continue;12301231// Constant-folded symbols go in the executable's symbol table, but don't1232// get a stabs entry unless --keep-icf-stabs flag is specified1233if (!config->keepICFStabs && defined->wasIdenticalCodeFolded)1234continue;12351236ObjFile *file = defined->getObjectFile();1237if (!file || !file->compileUnit)1238continue;12391240// We use 'originalIsec' to get the file id of the symbol since 'isec()'1241// might point to the merged ICF symbol's file1242symbolsNeedingStabs.emplace_back(defined,1243defined->originalIsec->getFile()->id);1244}1245}12461247llvm::stable_sort(symbolsNeedingStabs,1248[&](const SortingPair &a, const SortingPair &b) {1249return a.second < b.second;1250});12511252// Emit STABS symbols so that dsymutil and/or the debugger can map address1253// regions in the final binary to the source and object files from which they1254// originated.1255InputFile *lastFile = nullptr;1256for (SortingPair &pair : symbolsNeedingStabs) {1257Defined *defined = pair.first;1258// We use 'originalIsec' of the symbol since we care about the actual origin1259// of the symbol, not the canonical location returned by `isec()`.1260InputSection *isec = defined->originalIsec;1261ObjFile *file = cast<ObjFile>(isec->getFile());12621263if (lastFile == nullptr || lastFile != file) {1264if (lastFile != nullptr)1265emitEndSourceStab();1266lastFile = file;12671268emitBeginSourceStab(file->sourceFile());1269emitObjectFileStab(file);1270}12711272StabsEntry symStab;1273symStab.sect = isec->parent->index;1274symStab.strx = stringTableSection.addString(defined->getName());1275symStab.value = defined->getVA();12761277if (isCodeSection(isec)) {1278symStab.type = N_FUN;1279stabs.emplace_back(std::move(symStab));1280emitEndFunStab(defined);1281} else {1282symStab.type = defined->isExternal() ? N_GSYM : N_STSYM;1283stabs.emplace_back(std::move(symStab));1284}1285}12861287if (!stabs.empty())1288emitEndSourceStab();1289}12901291void SymtabSection::finalizeContents() {1292auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) {1293uint32_t strx = stringTableSection.addString(sym->getName());1294symbols.push_back({sym, strx});1295};12961297std::function<void(Symbol *)> localSymbolsHandler;1298switch (config->localSymbolsPresence) {1299case SymtabPresence::All:1300localSymbolsHandler = [&](Symbol *sym) { addSymbol(localSymbols, sym); };1301break;1302case SymtabPresence::None:1303localSymbolsHandler = [&](Symbol *) { /* Do nothing*/ };1304break;1305case SymtabPresence::SelectivelyIncluded:1306localSymbolsHandler = [&](Symbol *sym) {1307if (config->localSymbolPatterns.match(sym->getName()))1308addSymbol(localSymbols, sym);1309};1310break;1311case SymtabPresence::SelectivelyExcluded:1312localSymbolsHandler = [&](Symbol *sym) {1313if (!config->localSymbolPatterns.match(sym->getName()))1314addSymbol(localSymbols, sym);1315};1316break;1317}13181319// Local symbols aren't in the SymbolTable, so we walk the list of object1320// files to gather them.1321// But if `-x` is set, then we don't need to. localSymbolsHandler() will do1322// the right thing regardless, but this check is a perf optimization because1323// iterating through all the input files and their symbols is expensive.1324if (config->localSymbolsPresence != SymtabPresence::None) {1325for (const InputFile *file : inputFiles) {1326if (auto *objFile = dyn_cast<ObjFile>(file)) {1327for (Symbol *sym : objFile->symbols) {1328if (auto *defined = dyn_cast_or_null<Defined>(sym)) {1329if (defined->isExternal() || !defined->isLive() ||1330!defined->includeInSymtab)1331continue;1332localSymbolsHandler(sym);1333}1334}1335}1336}1337}13381339// __dyld_private is a local symbol too. It's linker-created and doesn't1340// exist in any object file.1341if (in.stubHelper && in.stubHelper->dyldPrivate)1342localSymbolsHandler(in.stubHelper->dyldPrivate);13431344for (Symbol *sym : symtab->getSymbols()) {1345if (!sym->isLive())1346continue;1347if (auto *defined = dyn_cast<Defined>(sym)) {1348if (!defined->includeInSymtab)1349continue;1350assert(defined->isExternal());1351if (defined->privateExtern)1352localSymbolsHandler(defined);1353else1354addSymbol(externalSymbols, defined);1355} else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) {1356if (dysym->isReferenced())1357addSymbol(undefinedSymbols, sym);1358}1359}13601361emitStabs();1362uint32_t symtabIndex = stabs.size();1363for (const SymtabEntry &entry :1364concat<SymtabEntry>(localSymbols, externalSymbols, undefinedSymbols)) {1365entry.sym->symtabIndex = symtabIndex++;1366}1367}13681369uint32_t SymtabSection::getNumSymbols() const {1370return stabs.size() + localSymbols.size() + externalSymbols.size() +1371undefinedSymbols.size();1372}13731374// This serves to hide (type-erase) the template parameter from SymtabSection.1375template <class LP> class SymtabSectionImpl final : public SymtabSection {1376public:1377SymtabSectionImpl(StringTableSection &stringTableSection)1378: SymtabSection(stringTableSection) {}1379uint64_t getRawSize() const override;1380void writeTo(uint8_t *buf) const override;1381};13821383template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const {1384return getNumSymbols() * sizeof(typename LP::nlist);1385}13861387template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const {1388auto *nList = reinterpret_cast<typename LP::nlist *>(buf);1389// Emit the stabs entries before the "real" symbols. We cannot emit them1390// after as that would render Symbol::symtabIndex inaccurate.1391for (const StabsEntry &entry : stabs) {1392nList->n_strx = entry.strx;1393nList->n_type = entry.type;1394nList->n_sect = entry.sect;1395nList->n_desc = entry.desc;1396nList->n_value = entry.value;1397++nList;1398}13991400for (const SymtabEntry &entry : concat<const SymtabEntry>(1401localSymbols, externalSymbols, undefinedSymbols)) {1402nList->n_strx = entry.strx;1403// TODO populate n_desc with more flags1404if (auto *defined = dyn_cast<Defined>(entry.sym)) {1405uint8_t scope = 0;1406if (defined->privateExtern) {1407// Private external -- dylib scoped symbol.1408// Promote to non-external at link time.1409scope = N_PEXT;1410} else if (defined->isExternal()) {1411// Normal global symbol.1412scope = N_EXT;1413} else {1414// TU-local symbol from localSymbols.1415scope = 0;1416}14171418if (defined->isAbsolute()) {1419nList->n_type = scope | N_ABS;1420nList->n_sect = NO_SECT;1421nList->n_value = defined->value;1422} else {1423nList->n_type = scope | N_SECT;1424nList->n_sect = defined->isec()->parent->index;1425// For the N_SECT symbol type, n_value is the address of the symbol1426nList->n_value = defined->getVA();1427}1428nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0;1429nList->n_desc |=1430defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0;1431} else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) {1432uint16_t n_desc = nList->n_desc;1433int16_t ordinal = ordinalForDylibSymbol(*dysym);1434if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP)1435SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL);1436else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE)1437SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL);1438else {1439assert(ordinal > 0);1440SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal));1441}14421443nList->n_type = N_EXT;1444n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0;1445n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0;1446nList->n_desc = n_desc;1447}1448++nList;1449}1450}14511452template <class LP>1453SymtabSection *1454macho::makeSymtabSection(StringTableSection &stringTableSection) {1455return make<SymtabSectionImpl<LP>>(stringTableSection);1456}14571458IndirectSymtabSection::IndirectSymtabSection()1459: LinkEditSection(segment_names::linkEdit,1460section_names::indirectSymbolTable) {}14611462uint32_t IndirectSymtabSection::getNumSymbols() const {1463uint32_t size = in.got->getEntries().size() +1464in.tlvPointers->getEntries().size() +1465in.stubs->getEntries().size();1466if (!config->emitChainedFixups)1467size += in.stubs->getEntries().size();1468return size;1469}14701471bool IndirectSymtabSection::isNeeded() const {1472return in.got->isNeeded() || in.tlvPointers->isNeeded() ||1473in.stubs->isNeeded();1474}14751476void IndirectSymtabSection::finalizeContents() {1477uint32_t off = 0;1478in.got->reserved1 = off;1479off += in.got->getEntries().size();1480in.tlvPointers->reserved1 = off;1481off += in.tlvPointers->getEntries().size();1482in.stubs->reserved1 = off;1483if (in.lazyPointers) {1484off += in.stubs->getEntries().size();1485in.lazyPointers->reserved1 = off;1486}1487}14881489static uint32_t indirectValue(const Symbol *sym) {1490if (sym->symtabIndex == UINT32_MAX || !needsBinding(sym))1491return INDIRECT_SYMBOL_LOCAL;1492return sym->symtabIndex;1493}14941495void IndirectSymtabSection::writeTo(uint8_t *buf) const {1496uint32_t off = 0;1497for (const Symbol *sym : in.got->getEntries()) {1498write32le(buf + off * sizeof(uint32_t), indirectValue(sym));1499++off;1500}1501for (const Symbol *sym : in.tlvPointers->getEntries()) {1502write32le(buf + off * sizeof(uint32_t), indirectValue(sym));1503++off;1504}1505for (const Symbol *sym : in.stubs->getEntries()) {1506write32le(buf + off * sizeof(uint32_t), indirectValue(sym));1507++off;1508}15091510if (in.lazyPointers) {1511// There is a 1:1 correspondence between stubs and LazyPointerSection1512// entries. But giving __stubs and __la_symbol_ptr the same reserved11513// (the offset into the indirect symbol table) so that they both refer1514// to the same range of offsets confuses `strip`, so write the stubs1515// symbol table offsets a second time.1516for (const Symbol *sym : in.stubs->getEntries()) {1517write32le(buf + off * sizeof(uint32_t), indirectValue(sym));1518++off;1519}1520}1521}15221523StringTableSection::StringTableSection()1524: LinkEditSection(segment_names::linkEdit, section_names::stringTable) {}15251526uint32_t StringTableSection::addString(StringRef str) {1527uint32_t strx = size;1528strings.push_back(str); // TODO: consider deduplicating strings1529size += str.size() + 1; // account for null terminator1530return strx;1531}15321533void StringTableSection::writeTo(uint8_t *buf) const {1534uint32_t off = 0;1535for (StringRef str : strings) {1536memcpy(buf + off, str.data(), str.size());1537off += str.size() + 1; // account for null terminator1538}1539}15401541static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0);1542static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0);15431544CodeSignatureSection::CodeSignatureSection()1545: LinkEditSection(segment_names::linkEdit, section_names::codeSignature) {1546align = 16; // required by libstuff15471548// XXX: This mimics LD64, where it uses the install-name as codesign1549// identifier, if available.1550if (!config->installName.empty())1551fileName = config->installName;1552else1553// FIXME: Consider using finalOutput instead of outputFile.1554fileName = config->outputFile;15551556size_t slashIndex = fileName.rfind("/");1557if (slashIndex != std::string::npos)1558fileName = fileName.drop_front(slashIndex + 1);15591560// NOTE: Any changes to these calculations should be repeated1561// in llvm-objcopy's MachOLayoutBuilder::layoutTail.1562allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1);1563fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size();1564}15651566uint32_t CodeSignatureSection::getBlockCount() const {1567return (fileOff + blockSize - 1) / blockSize;1568}15691570uint64_t CodeSignatureSection::getRawSize() const {1571return allHeadersSize + getBlockCount() * hashSize;1572}15731574void CodeSignatureSection::writeHashes(uint8_t *buf) const {1575// NOTE: Changes to this functionality should be repeated in llvm-objcopy's1576// MachOWriter::writeSignatureData.1577uint8_t *hashes = buf + fileOff + allHeadersSize;1578parallelFor(0, getBlockCount(), [&](size_t i) {1579sha256(buf + i * blockSize,1580std::min(static_cast<size_t>(fileOff - i * blockSize), blockSize),1581hashes + i * hashSize);1582});1583#if defined(__APPLE__)1584// This is macOS-specific work-around and makes no sense for any1585// other host OS. See https://openradar.appspot.com/FB89142311586//1587// The macOS kernel maintains a signature-verification cache to1588// quickly validate applications at time of execve(2). The trouble1589// is that for the kernel creates the cache entry at the time of the1590// mmap(2) call, before we have a chance to write either the code to1591// sign or the signature header+hashes. The fix is to invalidate1592// all cached data associated with the output file, thus discarding1593// the bogus prematurely-cached signature.1594msync(buf, fileOff + getSize(), MS_INVALIDATE);1595#endif1596}15971598void CodeSignatureSection::writeTo(uint8_t *buf) const {1599// NOTE: Changes to this functionality should be repeated in llvm-objcopy's1600// MachOWriter::writeSignatureData.1601uint32_t signatureSize = static_cast<uint32_t>(getSize());1602auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf);1603write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE);1604write32be(&superBlob->length, signatureSize);1605write32be(&superBlob->count, 1);1606auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]);1607write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY);1608write32be(&blobIndex->offset, blobHeadersSize);1609auto *codeDirectory =1610reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize);1611write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY);1612write32be(&codeDirectory->length, signatureSize - blobHeadersSize);1613write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG);1614write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED);1615write32be(&codeDirectory->hashOffset,1616sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad);1617write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory));1618codeDirectory->nSpecialSlots = 0;1619write32be(&codeDirectory->nCodeSlots, getBlockCount());1620write32be(&codeDirectory->codeLimit, fileOff);1621codeDirectory->hashSize = static_cast<uint8_t>(hashSize);1622codeDirectory->hashType = kSecCodeSignatureHashSHA256;1623codeDirectory->platform = 0;1624codeDirectory->pageSize = blockSizeShift;1625codeDirectory->spare2 = 0;1626codeDirectory->scatterOffset = 0;1627codeDirectory->teamOffset = 0;1628codeDirectory->spare3 = 0;1629codeDirectory->codeLimit64 = 0;1630OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text);1631write64be(&codeDirectory->execSegBase, textSeg->fileOff);1632write64be(&codeDirectory->execSegLimit, textSeg->fileSize);1633write64be(&codeDirectory->execSegFlags,1634config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0);1635auto *id = reinterpret_cast<char *>(&codeDirectory[1]);1636memcpy(id, fileName.begin(), fileName.size());1637memset(id + fileName.size(), 0, fileNamePad);1638}16391640CStringSection::CStringSection(const char *name)1641: SyntheticSection(segment_names::text, name) {1642flags = S_CSTRING_LITERALS;1643}16441645void CStringSection::addInput(CStringInputSection *isec) {1646isec->parent = this;1647inputs.push_back(isec);1648if (isec->align > align)1649align = isec->align;1650}16511652void CStringSection::writeTo(uint8_t *buf) const {1653for (const CStringInputSection *isec : inputs) {1654for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {1655if (!piece.live)1656continue;1657StringRef string = isec->getStringRef(i);1658memcpy(buf + piece.outSecOff, string.data(), string.size());1659}1660}1661}16621663void CStringSection::finalizeContents() {1664uint64_t offset = 0;1665for (CStringInputSection *isec : inputs) {1666for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {1667if (!piece.live)1668continue;1669// See comment above DeduplicatedCStringSection for how alignment is1670// handled.1671uint32_t pieceAlign = 11672<< llvm::countr_zero(isec->align | piece.inSecOff);1673offset = alignToPowerOf2(offset, pieceAlign);1674piece.outSecOff = offset;1675isec->isFinal = true;1676StringRef string = isec->getStringRef(i);1677offset += string.size() + 1; // account for null terminator1678}1679}1680size = offset;1681}16821683// Mergeable cstring literals are found under the __TEXT,__cstring section. In1684// contrast to ELF, which puts strings that need different alignments into1685// different sections, clang's Mach-O backend puts them all in one section.1686// Strings that need to be aligned have the .p2align directive emitted before1687// them, which simply translates into zero padding in the object file. In other1688// words, we have to infer the desired alignment of these cstrings from their1689// addresses.1690//1691// We differ slightly from ld64 in how we've chosen to align these cstrings.1692// Both LLD and ld64 preserve the number of trailing zeros in each cstring's1693// address in the input object files. When deduplicating identical cstrings,1694// both linkers pick the cstring whose address has more trailing zeros, and1695// preserve the alignment of that address in the final binary. However, ld641696// goes a step further and also preserves the offset of the cstring from the1697// last section-aligned address. I.e. if a cstring is at offset 18 in the1698// input, with a section alignment of 16, then both LLD and ld64 will ensure the1699// final address is 2-byte aligned (since 18 == 16 + 2). But ld64 will also1700// ensure that the final address is of the form 16 * k + 2 for some k.1701//1702// Note that ld64's heuristic means that a dedup'ed cstring's final address is1703// dependent on the order of the input object files. E.g. if in addition to the1704// cstring at offset 18 above, we have a duplicate one in another file with a1705// `.cstring` section alignment of 2 and an offset of zero, then ld64 will pick1706// the cstring from the object file earlier on the command line (since both have1707// the same number of trailing zeros in their address). So the final cstring may1708// either be at some address `16 * k + 2` or at some address `2 * k`.1709//1710// I've opted not to follow this behavior primarily for implementation1711// simplicity, and secondarily to save a few more bytes. It's not clear to me1712// that preserving the section alignment + offset is ever necessary, and there1713// are many cases that are clearly redundant. In particular, if an x86_64 object1714// file contains some strings that are accessed via SIMD instructions, then the1715// .cstring section in the object file will be 16-byte-aligned (since SIMD1716// requires its operand addresses to be 16-byte aligned). However, there will1717// typically also be other cstrings in the same file that aren't used via SIMD1718// and don't need this alignment. They will be emitted at some arbitrary address1719// `A`, but ld64 will treat them as being 16-byte aligned with an offset of `161720// % A`.1721void DeduplicatedCStringSection::finalizeContents() {1722// Find the largest alignment required for each string.1723for (const CStringInputSection *isec : inputs) {1724for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {1725if (!piece.live)1726continue;1727auto s = isec->getCachedHashStringRef(i);1728assert(isec->align != 0);1729uint8_t trailingZeros = llvm::countr_zero(isec->align | piece.inSecOff);1730auto it = stringOffsetMap.insert(1731std::make_pair(s, StringOffset(trailingZeros)));1732if (!it.second && it.first->second.trailingZeros < trailingZeros)1733it.first->second.trailingZeros = trailingZeros;1734}1735}17361737// Assign an offset for each string and save it to the corresponding1738// StringPieces for easy access.1739for (CStringInputSection *isec : inputs) {1740for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) {1741if (!piece.live)1742continue;1743auto s = isec->getCachedHashStringRef(i);1744auto it = stringOffsetMap.find(s);1745assert(it != stringOffsetMap.end());1746StringOffset &offsetInfo = it->second;1747if (offsetInfo.outSecOff == UINT64_MAX) {1748offsetInfo.outSecOff =1749alignToPowerOf2(size, 1ULL << offsetInfo.trailingZeros);1750size =1751offsetInfo.outSecOff + s.size() + 1; // account for null terminator1752}1753piece.outSecOff = offsetInfo.outSecOff;1754}1755isec->isFinal = true;1756}1757}17581759void DeduplicatedCStringSection::writeTo(uint8_t *buf) const {1760for (const auto &p : stringOffsetMap) {1761StringRef data = p.first.val();1762uint64_t off = p.second.outSecOff;1763if (!data.empty())1764memcpy(buf + off, data.data(), data.size());1765}1766}17671768DeduplicatedCStringSection::StringOffset1769DeduplicatedCStringSection::getStringOffset(StringRef str) const {1770// StringPiece uses 31 bits to store the hashes, so we replicate that1771uint32_t hash = xxh3_64bits(str) & 0x7fffffff;1772auto offset = stringOffsetMap.find(CachedHashStringRef(str, hash));1773assert(offset != stringOffsetMap.end() &&1774"Looked-up strings should always exist in section");1775return offset->second;1776}17771778// This section is actually emitted as __TEXT,__const by ld64, but clang may1779// emit input sections of that name, and LLD doesn't currently support mixing1780// synthetic and concat-type OutputSections. To work around this, I've given1781// our merged-literals section a different name.1782WordLiteralSection::WordLiteralSection()1783: SyntheticSection(segment_names::text, section_names::literals) {1784align = 16;1785}17861787void WordLiteralSection::addInput(WordLiteralInputSection *isec) {1788isec->parent = this;1789inputs.push_back(isec);1790}17911792void WordLiteralSection::finalizeContents() {1793for (WordLiteralInputSection *isec : inputs) {1794// We do all processing of the InputSection here, so it will be effectively1795// finalized.1796isec->isFinal = true;1797const uint8_t *buf = isec->data.data();1798switch (sectionType(isec->getFlags())) {1799case S_4BYTE_LITERALS: {1800for (size_t off = 0, e = isec->data.size(); off < e; off += 4) {1801if (!isec->isLive(off))1802continue;1803uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off);1804literal4Map.emplace(value, literal4Map.size());1805}1806break;1807}1808case S_8BYTE_LITERALS: {1809for (size_t off = 0, e = isec->data.size(); off < e; off += 8) {1810if (!isec->isLive(off))1811continue;1812uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off);1813literal8Map.emplace(value, literal8Map.size());1814}1815break;1816}1817case S_16BYTE_LITERALS: {1818for (size_t off = 0, e = isec->data.size(); off < e; off += 16) {1819if (!isec->isLive(off))1820continue;1821UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off);1822literal16Map.emplace(value, literal16Map.size());1823}1824break;1825}1826default:1827llvm_unreachable("invalid literal section type");1828}1829}1830}18311832void WordLiteralSection::writeTo(uint8_t *buf) const {1833// Note that we don't attempt to do any endianness conversion in addInput(),1834// so we don't do it here either -- just write out the original value,1835// byte-for-byte.1836for (const auto &p : literal16Map)1837memcpy(buf + p.second * 16, &p.first, 16);1838buf += literal16Map.size() * 16;18391840for (const auto &p : literal8Map)1841memcpy(buf + p.second * 8, &p.first, 8);1842buf += literal8Map.size() * 8;18431844for (const auto &p : literal4Map)1845memcpy(buf + p.second * 4, &p.first, 4);1846}18471848ObjCImageInfoSection::ObjCImageInfoSection()1849: SyntheticSection(segment_names::data, section_names::objCImageInfo) {}18501851ObjCImageInfoSection::ImageInfo1852ObjCImageInfoSection::parseImageInfo(const InputFile *file) {1853ImageInfo info;1854ArrayRef<uint8_t> data = file->objCImageInfo;1855// The image info struct has the following layout:1856// struct {1857// uint32_t version;1858// uint32_t flags;1859// };1860if (data.size() < 8) {1861warn(toString(file) + ": invalid __objc_imageinfo size");1862return info;1863}18641865auto *buf = reinterpret_cast<const uint32_t *>(data.data());1866if (read32le(buf) != 0) {1867warn(toString(file) + ": invalid __objc_imageinfo version");1868return info;1869}18701871uint32_t flags = read32le(buf + 1);1872info.swiftVersion = (flags >> 8) & 0xff;1873info.hasCategoryClassProperties = flags & 0x40;1874return info;1875}18761877static std::string swiftVersionString(uint8_t version) {1878switch (version) {1879case 1:1880return "1.0";1881case 2:1882return "1.1";1883case 3:1884return "2.0";1885case 4:1886return "3.0";1887case 5:1888return "4.0";1889default:1890return ("0x" + Twine::utohexstr(version)).str();1891}1892}18931894// Validate each object file's __objc_imageinfo and use them to generate the1895// image info for the output binary. Only two pieces of info are relevant:1896// 1. The Swift version (should be identical across inputs)1897// 2. `bool hasCategoryClassProperties` (true only if true for all inputs)1898void ObjCImageInfoSection::finalizeContents() {1899assert(files.size() != 0); // should have already been checked via isNeeded()19001901info.hasCategoryClassProperties = true;1902const InputFile *firstFile;1903for (const InputFile *file : files) {1904ImageInfo inputInfo = parseImageInfo(file);1905info.hasCategoryClassProperties &= inputInfo.hasCategoryClassProperties;19061907// swiftVersion 0 means no Swift is present, so no version checking required1908if (inputInfo.swiftVersion == 0)1909continue;19101911if (info.swiftVersion != 0 && info.swiftVersion != inputInfo.swiftVersion) {1912error("Swift version mismatch: " + toString(firstFile) + " has version " +1913swiftVersionString(info.swiftVersion) + " but " + toString(file) +1914" has version " + swiftVersionString(inputInfo.swiftVersion));1915} else {1916info.swiftVersion = inputInfo.swiftVersion;1917firstFile = file;1918}1919}1920}19211922void ObjCImageInfoSection::writeTo(uint8_t *buf) const {1923uint32_t flags = info.hasCategoryClassProperties ? 0x40 : 0x0;1924flags |= info.swiftVersion << 8;1925write32le(buf + 4, flags);1926}19271928InitOffsetsSection::InitOffsetsSection()1929: SyntheticSection(segment_names::text, section_names::initOffsets) {1930flags = S_INIT_FUNC_OFFSETS;1931align = 4; // This section contains 32-bit integers.1932}19331934uint64_t InitOffsetsSection::getSize() const {1935size_t count = 0;1936for (const ConcatInputSection *isec : sections)1937count += isec->relocs.size();1938return count * sizeof(uint32_t);1939}19401941void InitOffsetsSection::writeTo(uint8_t *buf) const {1942// FIXME: Add function specified by -init when that argument is implemented.1943for (ConcatInputSection *isec : sections) {1944for (const Reloc &rel : isec->relocs) {1945const Symbol *referent = rel.referent.dyn_cast<Symbol *>();1946assert(referent && "section relocation should have been rejected");1947uint64_t offset = referent->getVA() - in.header->addr;1948// FIXME: Can we handle this gracefully?1949if (offset > UINT32_MAX)1950fatal(isec->getLocation(rel.offset) + ": offset to initializer " +1951referent->getName() + " (" + utohexstr(offset) +1952") does not fit in 32 bits");19531954// Entries need to be added in the order they appear in the section, but1955// relocations aren't guaranteed to be sorted.1956size_t index = rel.offset >> target->p2WordSize;1957write32le(&buf[index * sizeof(uint32_t)], offset);1958}1959buf += isec->relocs.size() * sizeof(uint32_t);1960}1961}19621963// The inputs are __mod_init_func sections, which contain pointers to1964// initializer functions, therefore all relocations should be of the UNSIGNED1965// type. InitOffsetsSection stores offsets, so if the initializer's address is1966// not known at link time, stub-indirection has to be used.1967void InitOffsetsSection::setUp() {1968for (const ConcatInputSection *isec : sections) {1969for (const Reloc &rel : isec->relocs) {1970RelocAttrs attrs = target->getRelocAttrs(rel.type);1971if (!attrs.hasAttr(RelocAttrBits::UNSIGNED))1972error(isec->getLocation(rel.offset) +1973": unsupported relocation type: " + attrs.name);1974if (rel.addend != 0)1975error(isec->getLocation(rel.offset) +1976": relocation addend is not representable in __init_offsets");1977if (rel.referent.is<InputSection *>())1978error(isec->getLocation(rel.offset) +1979": unexpected section relocation");19801981Symbol *sym = rel.referent.dyn_cast<Symbol *>();1982if (auto *undefined = dyn_cast<Undefined>(sym))1983treatUndefinedSymbol(*undefined, isec, rel.offset);1984if (needsBinding(sym))1985in.stubs->addEntry(sym);1986}1987}1988}19891990ObjCMethListSection::ObjCMethListSection()1991: SyntheticSection(segment_names::text, section_names::objcMethList) {1992flags = S_ATTR_NO_DEAD_STRIP;1993align = relativeOffsetSize;1994}19951996// Go through all input method lists and ensure that we have selrefs for all1997// their method names. The selrefs will be needed later by ::writeTo. We need to1998// create them early on here to ensure they are processed correctly by the lld1999// pipeline.2000void ObjCMethListSection::setUp() {2001for (const ConcatInputSection *isec : inputs) {2002uint32_t structSizeAndFlags = 0, structCount = 0;2003readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);2004uint32_t originalStructSize = structSizeAndFlags & structSizeMask;2005// Method name is immediately after header2006uint32_t methodNameOff = methodListHeaderSize;20072008// Loop through all methods, and ensure a selref for each of them exists.2009while (methodNameOff < isec->data.size()) {2010const Reloc *reloc = isec->getRelocAt(methodNameOff);2011assert(reloc && "Relocation expected at method list name slot");2012auto *def = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());2013assert(def && "Expected valid Defined at method list name slot");2014auto *cisec = cast<CStringInputSection>(def->isec());2015assert(cisec && "Expected method name to be in a CStringInputSection");2016auto methname = cisec->getStringRefAtOffset(def->value);2017if (!ObjCSelRefsHelper::getSelRef(methname))2018ObjCSelRefsHelper::makeSelRef(methname);20192020// Jump to method name offset in next struct2021methodNameOff += originalStructSize;2022}2023}2024}20252026// Calculate section size and final offsets for where InputSection's need to be2027// written.2028void ObjCMethListSection::finalize() {2029// sectionSize will be the total size of the __objc_methlist section2030sectionSize = 0;2031for (ConcatInputSection *isec : inputs) {2032// We can also use sectionSize as write offset for isec2033assert(sectionSize == alignToPowerOf2(sectionSize, relativeOffsetSize) &&2034"expected __objc_methlist to be aligned by default with the "2035"required section alignment");2036isec->outSecOff = sectionSize;20372038isec->isFinal = true;2039uint32_t relativeListSize =2040computeRelativeMethodListSize(isec->data.size());2041sectionSize += relativeListSize;20422043// If encoding the method list in relative offset format shrinks the size,2044// then we also need to adjust symbol sizes to match the new size. Note that2045// on 32bit platforms the size of the method list will remain the same when2046// encoded in relative offset format.2047if (relativeListSize != isec->data.size()) {2048for (Symbol *sym : isec->symbols) {2049assert(isa<Defined>(sym) &&2050"Unexpected undefined symbol in ObjC method list");2051auto *def = cast<Defined>(sym);2052// There can be 0-size symbols, check if this is the case and ignore2053// them.2054if (def->size) {2055assert(2056def->size == isec->data.size() &&2057"Invalid ObjC method list symbol size: expected symbol size to "2058"match isec size");2059def->size = relativeListSize;2060}2061}2062}2063}2064}20652066void ObjCMethListSection::writeTo(uint8_t *bufStart) const {2067uint8_t *buf = bufStart;2068for (const ConcatInputSection *isec : inputs) {2069assert(buf - bufStart == long(isec->outSecOff) &&2070"Writing at unexpected offset");2071uint32_t writtenSize = writeRelativeMethodList(isec, buf);2072buf += writtenSize;2073}2074assert(buf - bufStart == sectionSize &&2075"Written size does not match expected section size");2076}20772078// Check if an InputSection is a method list. To do this we scan the2079// InputSection for any symbols who's names match the patterns we expect clang2080// to generate for method lists.2081bool ObjCMethListSection::isMethodList(const ConcatInputSection *isec) {2082const char *symPrefixes[] = {objc::symbol_names::classMethods,2083objc::symbol_names::instanceMethods,2084objc::symbol_names::categoryInstanceMethods,2085objc::symbol_names::categoryClassMethods};2086if (!isec)2087return false;2088for (const Symbol *sym : isec->symbols) {2089auto *def = dyn_cast_or_null<Defined>(sym);2090if (!def)2091continue;2092for (const char *prefix : symPrefixes) {2093if (def->getName().starts_with(prefix)) {2094assert(def->size == isec->data.size() &&2095"Invalid ObjC method list symbol size: expected symbol size to "2096"match isec size");2097assert(def->value == 0 &&2098"Offset of ObjC method list symbol must be 0");2099return true;2100}2101}2102}21032104return false;2105}21062107// Encode a single relative offset value. The input is the data/symbol at2108// (&isec->data[inSecOff]). The output is written to (&buf[outSecOff]).2109// 'createSelRef' indicates that we should not directly use the specified2110// symbol, but instead get the selRef for the symbol and use that instead.2111void ObjCMethListSection::writeRelativeOffsetForIsec(2112const ConcatInputSection *isec, uint8_t *buf, uint32_t &inSecOff,2113uint32_t &outSecOff, bool useSelRef) const {2114const Reloc *reloc = isec->getRelocAt(inSecOff);2115assert(reloc && "Relocation expected at __objc_methlist Offset");2116auto *def = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());2117assert(def && "Expected all syms in __objc_methlist to be defined");2118uint32_t symVA = def->getVA();21192120if (useSelRef) {2121auto *cisec = cast<CStringInputSection>(def->isec());2122auto methname = cisec->getStringRefAtOffset(def->value);2123ConcatInputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);2124assert(selRef && "Expected all selector names to already be already be "2125"present in __objc_selrefs");2126symVA = selRef->getVA();2127assert(selRef->data.size() == sizeof(target->wordSize) &&2128"Expected one selref per ConcatInputSection");2129}21302131uint32_t currentVA = isec->getVA() + outSecOff;2132uint32_t delta = symVA - currentVA;2133write32le(buf + outSecOff, delta);21342135// Move one pointer forward in the absolute method list2136inSecOff += target->wordSize;2137// Move one relative offset forward in the relative method list (32 bits)2138outSecOff += relativeOffsetSize;2139}21402141// Write a relative method list to buf, return the size of the written2142// information2143uint32_t2144ObjCMethListSection::writeRelativeMethodList(const ConcatInputSection *isec,2145uint8_t *buf) const {2146// Copy over the header, and add the "this is a relative method list" magic2147// value flag2148uint32_t structSizeAndFlags = 0, structCount = 0;2149readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);2150// Set the struct size for the relative method list2151uint32_t relativeStructSizeAndFlags =2152(relativeOffsetSize * pointersPerStruct) & structSizeMask;2153// Carry over the old flags from the input struct2154relativeStructSizeAndFlags |= structSizeAndFlags & structFlagsMask;2155// Set the relative method list flag2156relativeStructSizeAndFlags |= relMethodHeaderFlag;21572158writeMethodListHeader(buf, relativeStructSizeAndFlags, structCount);21592160assert(methodListHeaderSize +2161(structCount * pointersPerStruct * target->wordSize) ==2162isec->data.size() &&2163"Invalid computed ObjC method list size");21642165uint32_t inSecOff = methodListHeaderSize;2166uint32_t outSecOff = methodListHeaderSize;21672168// Go through the method list and encode input absolute pointers as relative2169// offsets. writeRelativeOffsetForIsec will be incrementing inSecOff and2170// outSecOff2171for (uint32_t i = 0; i < structCount; i++) {2172// Write the name of the method2173writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, true);2174// Write the type of the method2175writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);2176// Write reference to the selector of the method2177writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);2178}21792180// Expecting to have read all the data in the isec2181assert(inSecOff == isec->data.size() &&2182"Invalid actual ObjC method list size");2183assert(2184outSecOff == computeRelativeMethodListSize(inSecOff) &&2185"Mismatch between input & output size when writing relative method list");2186return outSecOff;2187}21882189// Given the size of an ObjC method list InputSection, return the size of the2190// method list when encoded in relative offsets format. We can do this without2191// decoding the actual data, as it can be directly inferred from the size of the2192// isec.2193uint32_t ObjCMethListSection::computeRelativeMethodListSize(2194uint32_t absoluteMethodListSize) const {2195uint32_t oldPointersSize = absoluteMethodListSize - methodListHeaderSize;2196uint32_t pointerCount = oldPointersSize / target->wordSize;2197assert(((pointerCount % pointersPerStruct) == 0) &&2198"__objc_methlist expects method lists to have multiple-of-3 pointers");21992200uint32_t newPointersSize = pointerCount * relativeOffsetSize;2201uint32_t newTotalSize = methodListHeaderSize + newPointersSize;22022203assert((newTotalSize <= absoluteMethodListSize) &&2204"Expected relative method list size to be smaller or equal than "2205"original size");2206return newTotalSize;2207}22082209// Read a method list header from buf2210void ObjCMethListSection::readMethodListHeader(const uint8_t *buf,2211uint32_t &structSizeAndFlags,2212uint32_t &structCount) const {2213structSizeAndFlags = read32le(buf);2214structCount = read32le(buf + sizeof(uint32_t));2215}22162217// Write a method list header to buf2218void ObjCMethListSection::writeMethodListHeader(uint8_t *buf,2219uint32_t structSizeAndFlags,2220uint32_t structCount) const {2221write32le(buf, structSizeAndFlags);2222write32le(buf + sizeof(structSizeAndFlags), structCount);2223}22242225void macho::createSyntheticSymbols() {2226auto addHeaderSymbol = [](const char *name) {2227symtab->addSynthetic(name, in.header->isec, /*value=*/0,2228/*isPrivateExtern=*/true, /*includeInSymtab=*/false,2229/*referencedDynamically=*/false);2230};22312232switch (config->outputType) {2233// FIXME: Assign the right address value for these symbols2234// (rather than 0). But we need to do that after assignAddresses().2235case MH_EXECUTE:2236// If linking PIE, __mh_execute_header is a defined symbol in2237// __TEXT, __text)2238// Otherwise, it's an absolute symbol.2239if (config->isPic)2240symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0,2241/*isPrivateExtern=*/false, /*includeInSymtab=*/true,2242/*referencedDynamically=*/true);2243else2244symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0,2245/*isPrivateExtern=*/false, /*includeInSymtab=*/true,2246/*referencedDynamically=*/true);2247break;22482249// The following symbols are N_SECT symbols, even though the header is not2250// part of any section and that they are private to the bundle/dylib/object2251// they are part of.2252case MH_BUNDLE:2253addHeaderSymbol("__mh_bundle_header");2254break;2255case MH_DYLIB:2256addHeaderSymbol("__mh_dylib_header");2257break;2258case MH_DYLINKER:2259addHeaderSymbol("__mh_dylinker_header");2260break;2261case MH_OBJECT:2262addHeaderSymbol("__mh_object_header");2263break;2264default:2265llvm_unreachable("unexpected outputType");2266break;2267}22682269// The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit2270// which does e.g. cleanup of static global variables. The ABI document2271// says that the pointer can point to any address in one of the dylib's2272// segments, but in practice ld64 seems to set it to point to the header,2273// so that's what's implemented here.2274addHeaderSymbol("___dso_handle");2275}22762277ChainedFixupsSection::ChainedFixupsSection()2278: LinkEditSection(segment_names::linkEdit, section_names::chainFixups) {}22792280bool ChainedFixupsSection::isNeeded() const {2281assert(config->emitChainedFixups);2282// dyld always expects LC_DYLD_CHAINED_FIXUPS to point to a valid2283// dyld_chained_fixups_header, so we create this section even if there aren't2284// any fixups.2285return true;2286}22872288void ChainedFixupsSection::addBinding(const Symbol *sym,2289const InputSection *isec, uint64_t offset,2290int64_t addend) {2291locations.emplace_back(isec, offset);2292int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;2293auto [it, inserted] = bindings.insert(2294{{sym, outlineAddend}, static_cast<uint32_t>(bindings.size())});22952296if (inserted) {2297symtabSize += sym->getName().size() + 1;2298hasWeakBind = hasWeakBind || needsWeakBind(*sym);2299if (!isInt<23>(outlineAddend))2300needsLargeAddend = true;2301else if (outlineAddend != 0)2302needsAddend = true;2303}2304}23052306std::pair<uint32_t, uint8_t>2307ChainedFixupsSection::getBinding(const Symbol *sym, int64_t addend) const {2308int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;2309auto it = bindings.find({sym, outlineAddend});2310assert(it != bindings.end() && "binding not found in the imports table");2311if (outlineAddend == 0)2312return {it->second, addend};2313return {it->second, 0};2314}23152316static size_t writeImport(uint8_t *buf, int format, int16_t libOrdinal,2317bool weakRef, uint32_t nameOffset, int64_t addend) {2318switch (format) {2319case DYLD_CHAINED_IMPORT: {2320auto *import = reinterpret_cast<dyld_chained_import *>(buf);2321import->lib_ordinal = libOrdinal;2322import->weak_import = weakRef;2323import->name_offset = nameOffset;2324return sizeof(dyld_chained_import);2325}2326case DYLD_CHAINED_IMPORT_ADDEND: {2327auto *import = reinterpret_cast<dyld_chained_import_addend *>(buf);2328import->lib_ordinal = libOrdinal;2329import->weak_import = weakRef;2330import->name_offset = nameOffset;2331import->addend = addend;2332return sizeof(dyld_chained_import_addend);2333}2334case DYLD_CHAINED_IMPORT_ADDEND64: {2335auto *import = reinterpret_cast<dyld_chained_import_addend64 *>(buf);2336import->lib_ordinal = libOrdinal;2337import->weak_import = weakRef;2338import->name_offset = nameOffset;2339import->addend = addend;2340return sizeof(dyld_chained_import_addend64);2341}2342default:2343llvm_unreachable("Unknown import format");2344}2345}23462347size_t ChainedFixupsSection::SegmentInfo::getSize() const {2348assert(pageStarts.size() > 0 && "SegmentInfo for segment with no fixups?");2349return alignTo<8>(sizeof(dyld_chained_starts_in_segment) +2350pageStarts.back().first * sizeof(uint16_t));2351}23522353size_t ChainedFixupsSection::SegmentInfo::writeTo(uint8_t *buf) const {2354auto *segInfo = reinterpret_cast<dyld_chained_starts_in_segment *>(buf);2355segInfo->size = getSize();2356segInfo->page_size = target->getPageSize();2357// FIXME: Use DYLD_CHAINED_PTR_64_OFFSET on newer OS versions.2358segInfo->pointer_format = DYLD_CHAINED_PTR_64;2359segInfo->segment_offset = oseg->addr - in.header->addr;2360segInfo->max_valid_pointer = 0; // not used on 64-bit2361segInfo->page_count = pageStarts.back().first + 1;23622363uint16_t *starts = segInfo->page_start;2364for (size_t i = 0; i < segInfo->page_count; ++i)2365starts[i] = DYLD_CHAINED_PTR_START_NONE;23662367for (auto [pageIdx, startAddr] : pageStarts)2368starts[pageIdx] = startAddr;2369return segInfo->size;2370}23712372static size_t importEntrySize(int format) {2373switch (format) {2374case DYLD_CHAINED_IMPORT:2375return sizeof(dyld_chained_import);2376case DYLD_CHAINED_IMPORT_ADDEND:2377return sizeof(dyld_chained_import_addend);2378case DYLD_CHAINED_IMPORT_ADDEND64:2379return sizeof(dyld_chained_import_addend64);2380default:2381llvm_unreachable("Unknown import format");2382}2383}23842385// This is step 3 of the algorithm described in the class comment of2386// ChainedFixupsSection.2387//2388// LC_DYLD_CHAINED_FIXUPS data consists of (in this order):2389// * A dyld_chained_fixups_header2390// * A dyld_chained_starts_in_image2391// * One dyld_chained_starts_in_segment per segment2392// * List of all imports (dyld_chained_import, dyld_chained_import_addend, or2393// dyld_chained_import_addend64)2394// * Names of imported symbols2395void ChainedFixupsSection::writeTo(uint8_t *buf) const {2396auto *header = reinterpret_cast<dyld_chained_fixups_header *>(buf);2397header->fixups_version = 0;2398header->imports_count = bindings.size();2399header->imports_format = importFormat;2400header->symbols_format = 0;24012402buf += alignTo<8>(sizeof(*header));24032404auto curOffset = [&buf, &header]() -> uint32_t {2405return buf - reinterpret_cast<uint8_t *>(header);2406};24072408header->starts_offset = curOffset();24092410auto *imageInfo = reinterpret_cast<dyld_chained_starts_in_image *>(buf);2411imageInfo->seg_count = outputSegments.size();2412uint32_t *segStarts = imageInfo->seg_info_offset;24132414// dyld_chained_starts_in_image ends in a flexible array member containing an2415// uint32_t for each segment. Leave room for it, and fill it via segStarts.2416buf += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +2417outputSegments.size() * sizeof(uint32_t));24182419// Initialize all offsets to 0, which indicates that the segment does not have2420// fixups. Those that do have them will be filled in below.2421for (size_t i = 0; i < outputSegments.size(); ++i)2422segStarts[i] = 0;24232424for (const SegmentInfo &seg : fixupSegments) {2425segStarts[seg.oseg->index] = curOffset() - header->starts_offset;2426buf += seg.writeTo(buf);2427}24282429// Write imports table.2430header->imports_offset = curOffset();2431uint64_t nameOffset = 0;2432for (auto [import, idx] : bindings) {2433const Symbol &sym = *import.first;2434buf += writeImport(buf, importFormat, ordinalForSymbol(sym),2435sym.isWeakRef(), nameOffset, import.second);2436nameOffset += sym.getName().size() + 1;2437}24382439// Write imported symbol names.2440header->symbols_offset = curOffset();2441for (auto [import, idx] : bindings) {2442StringRef name = import.first->getName();2443memcpy(buf, name.data(), name.size());2444buf += name.size() + 1; // account for null terminator2445}24462447assert(curOffset() == getRawSize());2448}24492450// This is step 2 of the algorithm described in the class comment of2451// ChainedFixupsSection.2452void ChainedFixupsSection::finalizeContents() {2453assert(target->wordSize == 8 && "Only 64-bit platforms are supported");2454assert(config->emitChainedFixups);24552456if (!isUInt<32>(symtabSize))2457error("cannot encode chained fixups: imported symbols table size " +2458Twine(symtabSize) + " exceeds 4 GiB");24592460bool needsLargeOrdinal = any_of(bindings, [](const auto &p) {2461// 0xF1 - 0xFF are reserved for special ordinals in the 8-bit encoding.2462return ordinalForSymbol(*p.first.first) > 0xF0;2463});24642465if (needsLargeAddend || !isUInt<23>(symtabSize) || needsLargeOrdinal)2466importFormat = DYLD_CHAINED_IMPORT_ADDEND64;2467else if (needsAddend)2468importFormat = DYLD_CHAINED_IMPORT_ADDEND;2469else2470importFormat = DYLD_CHAINED_IMPORT;24712472for (Location &loc : locations)2473loc.offset =2474loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset);24752476llvm::sort(locations, [](const Location &a, const Location &b) {2477const OutputSegment *segA = a.isec->parent->parent;2478const OutputSegment *segB = b.isec->parent->parent;2479if (segA == segB)2480return a.offset < b.offset;2481return segA->addr < segB->addr;2482});24832484auto sameSegment = [](const Location &a, const Location &b) {2485return a.isec->parent->parent == b.isec->parent->parent;2486};24872488const uint64_t pageSize = target->getPageSize();2489for (size_t i = 0, count = locations.size(); i < count;) {2490const Location &firstLoc = locations[i];2491fixupSegments.emplace_back(firstLoc.isec->parent->parent);2492while (i < count && sameSegment(locations[i], firstLoc)) {2493uint32_t pageIdx = locations[i].offset / pageSize;2494fixupSegments.back().pageStarts.emplace_back(2495pageIdx, locations[i].offset % pageSize);2496++i;2497while (i < count && sameSegment(locations[i], firstLoc) &&2498locations[i].offset / pageSize == pageIdx)2499++i;2500}2501}25022503// Compute expected encoded size.2504size = alignTo<8>(sizeof(dyld_chained_fixups_header));2505size += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +2506outputSegments.size() * sizeof(uint32_t));2507for (const SegmentInfo &seg : fixupSegments)2508size += seg.getSize();2509size += importEntrySize(importFormat) * bindings.size();2510size += symtabSize;2511}25122513template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &);2514template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &);251525162517