Path: blob/main/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp
34878 views
//===- AArch64.cpp --------------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#include "InputFiles.h"9#include "OutputSections.h"10#include "Symbols.h"11#include "SyntheticSections.h"12#include "Target.h"13#include "lld/Common/ErrorHandler.h"14#include "llvm/BinaryFormat/ELF.h"15#include "llvm/Support/Endian.h"1617using namespace llvm;18using namespace llvm::support::endian;19using namespace llvm::ELF;20using namespace lld;21using namespace lld::elf;2223// Page(Expr) is the page address of the expression Expr, defined24// as (Expr & ~0xFFF). (This applies even if the machine page size25// supported by the platform has a different value.)26uint64_t elf::getAArch64Page(uint64_t expr) {27return expr & ~static_cast<uint64_t>(0xFFF);28}2930namespace {31class AArch64 : public TargetInfo {32public:33AArch64();34RelExpr getRelExpr(RelType type, const Symbol &s,35const uint8_t *loc) const override;36RelType getDynRel(RelType type) const override;37int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;38void writeGotPlt(uint8_t *buf, const Symbol &s) const override;39void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;40void writePltHeader(uint8_t *buf) const override;41void writePlt(uint8_t *buf, const Symbol &sym,42uint64_t pltEntryAddr) const override;43bool needsThunk(RelExpr expr, RelType type, const InputFile *file,44uint64_t branchAddr, const Symbol &s,45int64_t a) const override;46uint32_t getThunkSectionSpacing() const override;47bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;48bool usesOnlyLowPageBits(RelType type) const override;49void relocate(uint8_t *loc, const Relocation &rel,50uint64_t val) const override;51RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;52void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;5354private:55void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;56void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;57void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;58};5960struct AArch64Relaxer {61bool safeToRelaxAdrpLdr = false;6263AArch64Relaxer(ArrayRef<Relocation> relocs);64bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel,65uint64_t secAddr, uint8_t *buf) const;66bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,67uint64_t secAddr, uint8_t *buf) const;68};69} // namespace7071// Return the bits [Start, End] from Val shifted Start bits.72// For instance, getBits(0xF0, 4, 8) returns 0xF.73static uint64_t getBits(uint64_t val, int start, int end) {74uint64_t mask = ((uint64_t)1 << (end + 1 - start)) - 1;75return (val >> start) & mask;76}7778AArch64::AArch64() {79copyRel = R_AARCH64_COPY;80relativeRel = R_AARCH64_RELATIVE;81iRelativeRel = R_AARCH64_IRELATIVE;82gotRel = R_AARCH64_GLOB_DAT;83pltRel = R_AARCH64_JUMP_SLOT;84symbolicRel = R_AARCH64_ABS64;85tlsDescRel = R_AARCH64_TLSDESC;86tlsGotRel = R_AARCH64_TLS_TPREL64;87pltHeaderSize = 32;88pltEntrySize = 16;89ipltEntrySize = 16;90defaultMaxPageSize = 65536;9192// Align to the 2 MiB page size (known as a superpage or huge page).93// FreeBSD automatically promotes 2 MiB-aligned allocations.94defaultImageBase = 0x200000;9596needsThunks = true;97}9899RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,100const uint8_t *loc) const {101switch (type) {102case R_AARCH64_ABS16:103case R_AARCH64_ABS32:104case R_AARCH64_ABS64:105case R_AARCH64_ADD_ABS_LO12_NC:106case R_AARCH64_LDST128_ABS_LO12_NC:107case R_AARCH64_LDST16_ABS_LO12_NC:108case R_AARCH64_LDST32_ABS_LO12_NC:109case R_AARCH64_LDST64_ABS_LO12_NC:110case R_AARCH64_LDST8_ABS_LO12_NC:111case R_AARCH64_MOVW_SABS_G0:112case R_AARCH64_MOVW_SABS_G1:113case R_AARCH64_MOVW_SABS_G2:114case R_AARCH64_MOVW_UABS_G0:115case R_AARCH64_MOVW_UABS_G0_NC:116case R_AARCH64_MOVW_UABS_G1:117case R_AARCH64_MOVW_UABS_G1_NC:118case R_AARCH64_MOVW_UABS_G2:119case R_AARCH64_MOVW_UABS_G2_NC:120case R_AARCH64_MOVW_UABS_G3:121return R_ABS;122case R_AARCH64_AUTH_ABS64:123return R_AARCH64_AUTH;124case R_AARCH64_TLSDESC_ADR_PAGE21:125return R_AARCH64_TLSDESC_PAGE;126case R_AARCH64_TLSDESC_LD64_LO12:127case R_AARCH64_TLSDESC_ADD_LO12:128return R_TLSDESC;129case R_AARCH64_TLSDESC_CALL:130return R_TLSDESC_CALL;131case R_AARCH64_TLSLE_ADD_TPREL_HI12:132case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:133case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:134case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:135case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:136case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:137case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:138case R_AARCH64_TLSLE_MOVW_TPREL_G0:139case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:140case R_AARCH64_TLSLE_MOVW_TPREL_G1:141case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:142case R_AARCH64_TLSLE_MOVW_TPREL_G2:143return R_TPREL;144case R_AARCH64_CALL26:145case R_AARCH64_CONDBR19:146case R_AARCH64_JUMP26:147case R_AARCH64_TSTBR14:148return R_PLT_PC;149case R_AARCH64_PLT32:150const_cast<Symbol &>(s).thunkAccessed = true;151return R_PLT_PC;152case R_AARCH64_PREL16:153case R_AARCH64_PREL32:154case R_AARCH64_PREL64:155case R_AARCH64_ADR_PREL_LO21:156case R_AARCH64_LD_PREL_LO19:157case R_AARCH64_MOVW_PREL_G0:158case R_AARCH64_MOVW_PREL_G0_NC:159case R_AARCH64_MOVW_PREL_G1:160case R_AARCH64_MOVW_PREL_G1_NC:161case R_AARCH64_MOVW_PREL_G2:162case R_AARCH64_MOVW_PREL_G2_NC:163case R_AARCH64_MOVW_PREL_G3:164return R_PC;165case R_AARCH64_ADR_PREL_PG_HI21:166case R_AARCH64_ADR_PREL_PG_HI21_NC:167return R_AARCH64_PAGE_PC;168case R_AARCH64_LD64_GOT_LO12_NC:169case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:170return R_GOT;171case R_AARCH64_LD64_GOTPAGE_LO15:172return R_AARCH64_GOT_PAGE;173case R_AARCH64_ADR_GOT_PAGE:174case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:175return R_AARCH64_GOT_PAGE_PC;176case R_AARCH64_GOTPCREL32:177case R_AARCH64_GOT_LD_PREL19:178return R_GOT_PC;179case R_AARCH64_NONE:180return R_NONE;181default:182error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +183") against symbol " + toString(s));184return R_NONE;185}186}187188RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const {189if (expr == R_RELAX_TLS_GD_TO_IE) {190if (type == R_AARCH64_TLSDESC_ADR_PAGE21)191return R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC;192return R_RELAX_TLS_GD_TO_IE_ABS;193}194return expr;195}196197bool AArch64::usesOnlyLowPageBits(RelType type) const {198switch (type) {199default:200return false;201case R_AARCH64_ADD_ABS_LO12_NC:202case R_AARCH64_LD64_GOT_LO12_NC:203case R_AARCH64_LDST128_ABS_LO12_NC:204case R_AARCH64_LDST16_ABS_LO12_NC:205case R_AARCH64_LDST32_ABS_LO12_NC:206case R_AARCH64_LDST64_ABS_LO12_NC:207case R_AARCH64_LDST8_ABS_LO12_NC:208case R_AARCH64_TLSDESC_ADD_LO12:209case R_AARCH64_TLSDESC_LD64_LO12:210case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:211return true;212}213}214215RelType AArch64::getDynRel(RelType type) const {216if (type == R_AARCH64_ABS64 || type == R_AARCH64_AUTH_ABS64)217return type;218return R_AARCH64_NONE;219}220221int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {222switch (type) {223case R_AARCH64_TLSDESC:224return read64(buf + 8);225case R_AARCH64_NONE:226case R_AARCH64_GLOB_DAT:227case R_AARCH64_JUMP_SLOT:228return 0;229case R_AARCH64_ABS16:230case R_AARCH64_PREL16:231return SignExtend64<16>(read16(buf));232case R_AARCH64_ABS32:233case R_AARCH64_PREL32:234return SignExtend64<32>(read32(buf));235case R_AARCH64_ABS64:236case R_AARCH64_PREL64:237case R_AARCH64_RELATIVE:238case R_AARCH64_IRELATIVE:239case R_AARCH64_TLS_TPREL64:240return read64(buf);241242// The following relocation types all point at instructions, and243// relocate an immediate field in the instruction.244//245// The general rule, from AAELF64 ยง5.7.2 "Addends and PC-bias",246// says: "If the relocation relocates an instruction the immediate247// field of the instruction is extracted, scaled as required by248// the instruction field encoding, and sign-extended to 64 bits".249250// The R_AARCH64_MOVW family operates on wide MOV/MOVK/MOVZ251// instructions, which have a 16-bit immediate field with its low252// bit in bit 5 of the instruction encoding. When the immediate253// field is used as an implicit addend for REL-type relocations,254// it is treated as added to the low bits of the output value, not255// shifted depending on the relocation type.256//257// This allows REL relocations to express the requirement 'please258// add 12345 to this symbol value and give me the four 16-bit259// chunks of the result', by putting the same addend 12345 in all260// four instructions. Carries between the 16-bit chunks are261// handled correctly, because the whole 64-bit addition is done262// once per relocation.263case R_AARCH64_MOVW_UABS_G0:264case R_AARCH64_MOVW_UABS_G0_NC:265case R_AARCH64_MOVW_UABS_G1:266case R_AARCH64_MOVW_UABS_G1_NC:267case R_AARCH64_MOVW_UABS_G2:268case R_AARCH64_MOVW_UABS_G2_NC:269case R_AARCH64_MOVW_UABS_G3:270return SignExtend64<16>(getBits(read32(buf), 5, 20));271272// R_AARCH64_TSTBR14 points at a TBZ or TBNZ instruction, which273// has a 14-bit offset measured in instructions, i.e. shifted left274// by 2.275case R_AARCH64_TSTBR14:276return SignExtend64<16>(getBits(read32(buf), 5, 18) << 2);277278// R_AARCH64_CONDBR19 operates on the ordinary B.cond instruction,279// which has a 19-bit offset measured in instructions.280//281// R_AARCH64_LD_PREL_LO19 operates on the LDR (literal)282// instruction, which also has a 19-bit offset, measured in 4-byte283// chunks. So the calculation is the same as for284// R_AARCH64_CONDBR19.285case R_AARCH64_CONDBR19:286case R_AARCH64_LD_PREL_LO19:287return SignExtend64<21>(getBits(read32(buf), 5, 23) << 2);288289// R_AARCH64_ADD_ABS_LO12_NC operates on ADD (immediate). The290// immediate can optionally be shifted left by 12 bits, but this291// relocation is intended for the case where it is not.292case R_AARCH64_ADD_ABS_LO12_NC:293return SignExtend64<12>(getBits(read32(buf), 10, 21));294295// R_AARCH64_ADR_PREL_LO21 operates on an ADR instruction, whose296// 21-bit immediate is split between two bits high up in the word297// (in fact the two _lowest_ order bits of the value) and 19 bits298// lower down.299//300// R_AARCH64_ADR_PREL_PG_HI21[_NC] operate on an ADRP instruction,301// which encodes the immediate in the same way, but will shift it302// left by 12 bits when the instruction executes. For the same303// reason as the MOVW family, we don't apply that left shift here.304case R_AARCH64_ADR_PREL_LO21:305case R_AARCH64_ADR_PREL_PG_HI21:306case R_AARCH64_ADR_PREL_PG_HI21_NC:307return SignExtend64<21>((getBits(read32(buf), 5, 23) << 2) |308getBits(read32(buf), 29, 30));309310// R_AARCH64_{JUMP,CALL}26 operate on B and BL, which have a311// 26-bit offset measured in instructions.312case R_AARCH64_JUMP26:313case R_AARCH64_CALL26:314return SignExtend64<28>(getBits(read32(buf), 0, 25) << 2);315316default:317internalLinkerError(getErrorLocation(buf),318"cannot read addend for relocation " + toString(type));319return 0;320}321}322323void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {324write64(buf, in.plt->getVA());325}326327void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {328if (config->writeAddends)329write64(buf, s.getVA());330}331332void AArch64::writePltHeader(uint8_t *buf) const {333const uint8_t pltData[] = {3340xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]!3350x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[2]))3360x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[2]))]3370x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[2]))3380x20, 0x02, 0x1f, 0xd6, // br x173390x1f, 0x20, 0x03, 0xd5, // nop3400x1f, 0x20, 0x03, 0xd5, // nop3410x1f, 0x20, 0x03, 0xd5 // nop342};343memcpy(buf, pltData, sizeof(pltData));344345uint64_t got = in.gotPlt->getVA();346uint64_t plt = in.plt->getVA();347relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,348getAArch64Page(got + 16) - getAArch64Page(plt + 4));349relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);350relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);351}352353void AArch64::writePlt(uint8_t *buf, const Symbol &sym,354uint64_t pltEntryAddr) const {355const uint8_t inst[] = {3560x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n]))3570x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[n]))]3580x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[n]))3590x20, 0x02, 0x1f, 0xd6 // br x17360};361memcpy(buf, inst, sizeof(inst));362363uint64_t gotPltEntryAddr = sym.getGotPltVA();364relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,365getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));366relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);367relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);368}369370bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,371uint64_t branchAddr, const Symbol &s,372int64_t a) const {373// If s is an undefined weak symbol and does not have a PLT entry then it will374// be resolved as a branch to the next instruction. If it is hidden, its375// binding has been converted to local, so we just check isUndefined() here. A376// undefined non-weak symbol will have been errored.377if (s.isUndefined() && !s.isInPlt())378return false;379// ELF for the ARM 64-bit architecture, section Call and Jump relocations380// only permits range extension thunks for R_AARCH64_CALL26 and381// R_AARCH64_JUMP26 relocation types.382if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&383type != R_AARCH64_PLT32)384return false;385uint64_t dst = expr == R_PLT_PC ? s.getPltVA() : s.getVA(a);386return !inBranchRange(type, branchAddr, dst);387}388389uint32_t AArch64::getThunkSectionSpacing() const {390// See comment in Arch/ARM.cpp for a more detailed explanation of391// getThunkSectionSpacing(). For AArch64 the only branches we are permitted to392// Thunk have a range of +/- 128 MiB393return (128 * 1024 * 1024) - 0x30000;394}395396bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {397if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&398type != R_AARCH64_PLT32)399return true;400// The AArch64 call and unconditional branch instructions have a range of401// +/- 128 MiB. The PLT32 relocation supports a range up to +/- 2 GiB.402uint64_t range =403type == R_AARCH64_PLT32 ? (UINT64_C(1) << 31) : (128 * 1024 * 1024);404if (dst > src) {405// Immediate of branch is signed.406range -= 4;407return dst - src <= range;408}409return src - dst <= range;410}411412static void write32AArch64Addr(uint8_t *l, uint64_t imm) {413uint32_t immLo = (imm & 0x3) << 29;414uint32_t immHi = (imm & 0x1FFFFC) << 3;415uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);416write32le(l, (read32le(l) & ~mask) | immLo | immHi);417}418419static void writeMaskedBits32le(uint8_t *p, int32_t v, uint32_t mask) {420write32le(p, (read32le(p) & ~mask) | v);421}422423// Update the immediate field in a AARCH64 ldr, str, and add instruction.424static void write32Imm12(uint8_t *l, uint64_t imm) {425writeMaskedBits32le(l, (imm & 0xFFF) << 10, 0xFFF << 10);426}427428// Update the immediate field in an AArch64 movk, movn or movz instruction429// for a signed relocation, and update the opcode of a movn or movz instruction430// to match the sign of the operand.431static void writeSMovWImm(uint8_t *loc, uint32_t imm) {432uint32_t inst = read32le(loc);433// Opcode field is bits 30, 29, with 10 = movz, 00 = movn and 11 = movk.434if (!(inst & (1 << 29))) {435// movn or movz.436if (imm & 0x10000) {437// Change opcode to movn, which takes an inverted operand.438imm ^= 0xFFFF;439inst &= ~(1 << 30);440} else {441// Change opcode to movz.442inst |= 1 << 30;443}444}445write32le(loc, inst | ((imm & 0xFFFF) << 5));446}447448void AArch64::relocate(uint8_t *loc, const Relocation &rel,449uint64_t val) const {450switch (rel.type) {451case R_AARCH64_ABS16:452case R_AARCH64_PREL16:453checkIntUInt(loc, val, 16, rel);454write16(loc, val);455break;456case R_AARCH64_ABS32:457case R_AARCH64_PREL32:458checkIntUInt(loc, val, 32, rel);459write32(loc, val);460break;461case R_AARCH64_PLT32:462case R_AARCH64_GOTPCREL32:463checkInt(loc, val, 32, rel);464write32(loc, val);465break;466case R_AARCH64_ABS64:467// AArch64 relocations to tagged symbols have extended semantics, as468// described here:469// https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative.470// tl;dr: encode the symbol's special addend in the place, which is an471// offset to the point where the logical tag is derived from. Quick hack, if472// the addend is within the symbol's bounds, no need to encode the tag473// derivation offset.474if (rel.sym && rel.sym->isTagged() &&475(rel.addend < 0 ||476rel.addend >= static_cast<int64_t>(rel.sym->getSize())))477write64(loc, -rel.addend);478else479write64(loc, val);480break;481case R_AARCH64_PREL64:482write64(loc, val);483break;484case R_AARCH64_AUTH_ABS64:485// If val is wider than 32 bits, the relocation must have been moved from486// .relr.auth.dyn to .rela.dyn, and the addend write is not needed.487//488// If val fits in 32 bits, we have two potential scenarios:489// * True RELR: Write the 32-bit `val`.490// * RELA: Even if the value now fits in 32 bits, it might have been491// converted from RELR during an iteration in492// finalizeAddressDependentContent(). Writing the value is harmless493// because dynamic linking ignores it.494if (isInt<32>(val))495write32(loc, val);496break;497case R_AARCH64_ADD_ABS_LO12_NC:498write32Imm12(loc, val);499break;500case R_AARCH64_ADR_GOT_PAGE:501case R_AARCH64_ADR_PREL_PG_HI21:502case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:503case R_AARCH64_TLSDESC_ADR_PAGE21:504checkInt(loc, val, 33, rel);505[[fallthrough]];506case R_AARCH64_ADR_PREL_PG_HI21_NC:507write32AArch64Addr(loc, val >> 12);508break;509case R_AARCH64_ADR_PREL_LO21:510checkInt(loc, val, 21, rel);511write32AArch64Addr(loc, val);512break;513case R_AARCH64_JUMP26:514// Normally we would just write the bits of the immediate field, however515// when patching instructions for the cpu errata fix -fix-cortex-a53-843419516// we want to replace a non-branch instruction with a branch immediate517// instruction. By writing all the bits of the instruction including the518// opcode and the immediate (0 001 | 01 imm26) we can do this519// transformation by placing a R_AARCH64_JUMP26 relocation at the offset of520// the instruction we want to patch.521write32le(loc, 0x14000000);522[[fallthrough]];523case R_AARCH64_CALL26:524checkInt(loc, val, 28, rel);525writeMaskedBits32le(loc, (val & 0x0FFFFFFC) >> 2, 0x0FFFFFFC >> 2);526break;527case R_AARCH64_CONDBR19:528case R_AARCH64_LD_PREL_LO19:529case R_AARCH64_GOT_LD_PREL19:530checkAlignment(loc, val, 4, rel);531checkInt(loc, val, 21, rel);532writeMaskedBits32le(loc, (val & 0x1FFFFC) << 3, 0x1FFFFC << 3);533break;534case R_AARCH64_LDST8_ABS_LO12_NC:535case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:536write32Imm12(loc, getBits(val, 0, 11));537break;538case R_AARCH64_LDST16_ABS_LO12_NC:539case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:540checkAlignment(loc, val, 2, rel);541write32Imm12(loc, getBits(val, 1, 11));542break;543case R_AARCH64_LDST32_ABS_LO12_NC:544case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:545checkAlignment(loc, val, 4, rel);546write32Imm12(loc, getBits(val, 2, 11));547break;548case R_AARCH64_LDST64_ABS_LO12_NC:549case R_AARCH64_LD64_GOT_LO12_NC:550case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:551case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:552case R_AARCH64_TLSDESC_LD64_LO12:553checkAlignment(loc, val, 8, rel);554write32Imm12(loc, getBits(val, 3, 11));555break;556case R_AARCH64_LDST128_ABS_LO12_NC:557case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:558checkAlignment(loc, val, 16, rel);559write32Imm12(loc, getBits(val, 4, 11));560break;561case R_AARCH64_LD64_GOTPAGE_LO15:562checkAlignment(loc, val, 8, rel);563write32Imm12(loc, getBits(val, 3, 14));564break;565case R_AARCH64_MOVW_UABS_G0:566checkUInt(loc, val, 16, rel);567[[fallthrough]];568case R_AARCH64_MOVW_UABS_G0_NC:569writeMaskedBits32le(loc, (val & 0xFFFF) << 5, 0xFFFF << 5);570break;571case R_AARCH64_MOVW_UABS_G1:572checkUInt(loc, val, 32, rel);573[[fallthrough]];574case R_AARCH64_MOVW_UABS_G1_NC:575writeMaskedBits32le(loc, (val & 0xFFFF0000) >> 11, 0xFFFF0000 >> 11);576break;577case R_AARCH64_MOVW_UABS_G2:578checkUInt(loc, val, 48, rel);579[[fallthrough]];580case R_AARCH64_MOVW_UABS_G2_NC:581writeMaskedBits32le(loc, (val & 0xFFFF00000000) >> 27,5820xFFFF00000000 >> 27);583break;584case R_AARCH64_MOVW_UABS_G3:585writeMaskedBits32le(loc, (val & 0xFFFF000000000000) >> 43,5860xFFFF000000000000 >> 43);587break;588case R_AARCH64_MOVW_PREL_G0:589case R_AARCH64_MOVW_SABS_G0:590case R_AARCH64_TLSLE_MOVW_TPREL_G0:591checkInt(loc, val, 17, rel);592[[fallthrough]];593case R_AARCH64_MOVW_PREL_G0_NC:594case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:595writeSMovWImm(loc, val);596break;597case R_AARCH64_MOVW_PREL_G1:598case R_AARCH64_MOVW_SABS_G1:599case R_AARCH64_TLSLE_MOVW_TPREL_G1:600checkInt(loc, val, 33, rel);601[[fallthrough]];602case R_AARCH64_MOVW_PREL_G1_NC:603case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:604writeSMovWImm(loc, val >> 16);605break;606case R_AARCH64_MOVW_PREL_G2:607case R_AARCH64_MOVW_SABS_G2:608case R_AARCH64_TLSLE_MOVW_TPREL_G2:609checkInt(loc, val, 49, rel);610[[fallthrough]];611case R_AARCH64_MOVW_PREL_G2_NC:612writeSMovWImm(loc, val >> 32);613break;614case R_AARCH64_MOVW_PREL_G3:615writeSMovWImm(loc, val >> 48);616break;617case R_AARCH64_TSTBR14:618checkInt(loc, val, 16, rel);619writeMaskedBits32le(loc, (val & 0xFFFC) << 3, 0xFFFC << 3);620break;621case R_AARCH64_TLSLE_ADD_TPREL_HI12:622checkUInt(loc, val, 24, rel);623write32Imm12(loc, val >> 12);624break;625case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:626case R_AARCH64_TLSDESC_ADD_LO12:627write32Imm12(loc, val);628break;629case R_AARCH64_TLSDESC:630// For R_AARCH64_TLSDESC the addend is stored in the second 64-bit word.631write64(loc + 8, val);632break;633default:634llvm_unreachable("unknown relocation");635}636}637638void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,639uint64_t val) const {640// TLSDESC Global-Dynamic relocation are in the form:641// adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21]642// ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12]643// add x0, x0, :tlsdesc_los:v [R_AARCH64_TLSDESC_ADD_LO12]644// .tlsdesccall [R_AARCH64_TLSDESC_CALL]645// blr x1646// And it can optimized to:647// movz x0, #0x0, lsl #16648// movk x0, #0x10649// nop650// nop651checkUInt(loc, val, 32, rel);652653switch (rel.type) {654case R_AARCH64_TLSDESC_ADD_LO12:655case R_AARCH64_TLSDESC_CALL:656write32le(loc, 0xd503201f); // nop657return;658case R_AARCH64_TLSDESC_ADR_PAGE21:659write32le(loc, 0xd2a00000 | (((val >> 16) & 0xffff) << 5)); // movz660return;661case R_AARCH64_TLSDESC_LD64_LO12:662write32le(loc, 0xf2800000 | ((val & 0xffff) << 5)); // movk663return;664default:665llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");666}667}668669void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,670uint64_t val) const {671// TLSDESC Global-Dynamic relocation are in the form:672// adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21]673// ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12]674// add x0, x0, :tlsdesc_los:v [R_AARCH64_TLSDESC_ADD_LO12]675// .tlsdesccall [R_AARCH64_TLSDESC_CALL]676// blr x1677// And it can optimized to:678// adrp x0, :gottprel:v679// ldr x0, [x0, :gottprel_lo12:v]680// nop681// nop682683switch (rel.type) {684case R_AARCH64_TLSDESC_ADD_LO12:685case R_AARCH64_TLSDESC_CALL:686write32le(loc, 0xd503201f); // nop687break;688case R_AARCH64_TLSDESC_ADR_PAGE21:689write32le(loc, 0x90000000); // adrp690relocateNoSym(loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, val);691break;692case R_AARCH64_TLSDESC_LD64_LO12:693write32le(loc, 0xf9400000); // ldr694relocateNoSym(loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, val);695break;696default:697llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");698}699}700701void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,702uint64_t val) const {703checkUInt(loc, val, 32, rel);704705if (rel.type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {706// Generate MOVZ.707uint32_t regNo = read32le(loc) & 0x1f;708write32le(loc, (0xd2a00000 | regNo) | (((val >> 16) & 0xffff) << 5));709return;710}711if (rel.type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {712// Generate MOVK.713uint32_t regNo = read32le(loc) & 0x1f;714write32le(loc, (0xf2800000 | regNo) | ((val & 0xffff) << 5));715return;716}717llvm_unreachable("invalid relocation for TLS IE to LE relaxation");718}719720AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {721if (!config->relax)722return;723// Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC724// always appear in pairs.725size_t i = 0;726const size_t size = relocs.size();727for (; i != size; ++i) {728if (relocs[i].type == R_AARCH64_ADR_GOT_PAGE) {729if (i + 1 < size && relocs[i + 1].type == R_AARCH64_LD64_GOT_LO12_NC) {730++i;731continue;732}733break;734} else if (relocs[i].type == R_AARCH64_LD64_GOT_LO12_NC) {735break;736}737}738safeToRelaxAdrpLdr = i == size;739}740741bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,742const Relocation &addRel, uint64_t secAddr,743uint8_t *buf) const {744// When the address of sym is within the range of ADR then745// we may relax746// ADRP xn, sym747// ADD xn, xn, :lo12: sym748// to749// NOP750// ADR xn, sym751if (!config->relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 ||752addRel.type != R_AARCH64_ADD_ABS_LO12_NC)753return false;754// Check if the relocations apply to consecutive instructions.755if (adrpRel.offset + 4 != addRel.offset)756return false;757if (adrpRel.sym != addRel.sym)758return false;759if (adrpRel.addend != 0 || addRel.addend != 0)760return false;761762uint32_t adrpInstr = read32le(buf + adrpRel.offset);763uint32_t addInstr = read32le(buf + addRel.offset);764// Check if the first instruction is ADRP and the second instruction is ADD.765if ((adrpInstr & 0x9f000000) != 0x90000000 ||766(addInstr & 0xffc00000) != 0x91000000)767return false;768uint32_t adrpDestReg = adrpInstr & 0x1f;769uint32_t addDestReg = addInstr & 0x1f;770uint32_t addSrcReg = (addInstr >> 5) & 0x1f;771if (adrpDestReg != addDestReg || adrpDestReg != addSrcReg)772return false;773774Symbol &sym = *adrpRel.sym;775// Check if the address difference is within 1MiB range.776int64_t val = sym.getVA() - (secAddr + addRel.offset);777if (val < -1024 * 1024 || val >= 1024 * 1024)778return false;779780Relocation adrRel = {R_ABS, R_AARCH64_ADR_PREL_LO21, addRel.offset,781/*addend=*/0, &sym};782// nop783write32le(buf + adrpRel.offset, 0xd503201f);784// adr x_<dest_reg>785write32le(buf + adrRel.offset, 0x10000000 | adrpDestReg);786target->relocate(buf + adrRel.offset, adrRel, val);787return true;788}789790bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,791const Relocation &ldrRel, uint64_t secAddr,792uint8_t *buf) const {793if (!safeToRelaxAdrpLdr)794return false;795796// When the definition of sym is not preemptible then we may797// be able to relax798// ADRP xn, :got: sym799// LDR xn, [ xn :got_lo12: sym]800// to801// ADRP xn, sym802// ADD xn, xn, :lo_12: sym803804if (adrpRel.type != R_AARCH64_ADR_GOT_PAGE ||805ldrRel.type != R_AARCH64_LD64_GOT_LO12_NC)806return false;807// Check if the relocations apply to consecutive instructions.808if (adrpRel.offset + 4 != ldrRel.offset)809return false;810// Check if the relocations reference the same symbol and811// skip undefined, preemptible and STT_GNU_IFUNC symbols.812if (!adrpRel.sym || adrpRel.sym != ldrRel.sym || !adrpRel.sym->isDefined() ||813adrpRel.sym->isPreemptible || adrpRel.sym->isGnuIFunc())814return false;815// Check if the addends of the both relocations are zero.816if (adrpRel.addend != 0 || ldrRel.addend != 0)817return false;818uint32_t adrpInstr = read32le(buf + adrpRel.offset);819uint32_t ldrInstr = read32le(buf + ldrRel.offset);820// Check if the first instruction is ADRP and the second instruction is LDR.821if ((adrpInstr & 0x9f000000) != 0x90000000 ||822(ldrInstr & 0x3b000000) != 0x39000000)823return false;824// Check the value of the sf bit.825if (!(ldrInstr >> 31))826return false;827uint32_t adrpDestReg = adrpInstr & 0x1f;828uint32_t ldrDestReg = ldrInstr & 0x1f;829uint32_t ldrSrcReg = (ldrInstr >> 5) & 0x1f;830// Check if ADPR and LDR use the same register.831if (adrpDestReg != ldrDestReg || adrpDestReg != ldrSrcReg)832return false;833834Symbol &sym = *adrpRel.sym;835// GOT references to absolute symbols can't be relaxed to use ADRP/ADD in836// position-independent code because these instructions produce a relative837// address.838if (config->isPic && !cast<Defined>(sym).section)839return false;840// Check if the address difference is within 4GB range.841int64_t val =842getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset);843if (val != llvm::SignExtend64(val, 33))844return false;845846Relocation adrpSymRel = {R_AARCH64_PAGE_PC, R_AARCH64_ADR_PREL_PG_HI21,847adrpRel.offset, /*addend=*/0, &sym};848Relocation addRel = {R_ABS, R_AARCH64_ADD_ABS_LO12_NC, ldrRel.offset,849/*addend=*/0, &sym};850851// adrp x_<dest_reg>852write32le(buf + adrpSymRel.offset, 0x90000000 | adrpDestReg);853// add x_<dest reg>, x_<dest reg>854write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5));855856target->relocate(buf + adrpSymRel.offset, adrpSymRel,857SignExtend64(getAArch64Page(sym.getVA()) -858getAArch64Page(secAddr + adrpSymRel.offset),85964));860target->relocate(buf + addRel.offset, addRel, SignExtend64(sym.getVA(), 64));861tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf);862return true;863}864865// Tagged symbols have upper address bits that are added by the dynamic loader,866// and thus need the full 64-bit GOT entry. Do not relax such symbols.867static bool needsGotForMemtag(const Relocation &rel) {868return rel.sym->isTagged() && needsGot(rel.expr);869}870871void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {872uint64_t secAddr = sec.getOutputSection()->addr;873if (auto *s = dyn_cast<InputSection>(&sec))874secAddr += s->outSecOff;875else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))876secAddr += ehIn->getParent()->outSecOff;877AArch64Relaxer relaxer(sec.relocs());878for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {879const Relocation &rel = sec.relocs()[i];880uint8_t *loc = buf + rel.offset;881const uint64_t val =882sec.getRelocTargetVA(sec.file, rel.type, rel.addend,883secAddr + rel.offset, *rel.sym, rel.expr);884885if (needsGotForMemtag(rel)) {886relocate(loc, rel, val);887continue;888}889890switch (rel.expr) {891case R_AARCH64_GOT_PAGE_PC:892if (i + 1 < size &&893relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) {894++i;895continue;896}897break;898case R_AARCH64_PAGE_PC:899if (i + 1 < size &&900relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) {901++i;902continue;903}904break;905case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:906case R_RELAX_TLS_GD_TO_IE_ABS:907relaxTlsGdToIe(loc, rel, val);908continue;909case R_RELAX_TLS_GD_TO_LE:910relaxTlsGdToLe(loc, rel, val);911continue;912case R_RELAX_TLS_IE_TO_LE:913relaxTlsIeToLe(loc, rel, val);914continue;915default:916break;917}918relocate(loc, rel, val);919}920}921922// AArch64 may use security features in variant PLT sequences. These are:923// Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target924// Indicator (BTI) introduced in armv8.5-a. The additional instructions used925// in the variant Plt sequences are encoded in the Hint space so they can be926// deployed on older architectures, which treat the instructions as a nop.927// PAC and BTI can be combined leading to the following combinations:928// writePltHeader929// writePltHeaderBti (no PAC Header needed)930// writePlt931// writePltBti (BTI only)932// writePltPac (PAC only)933// writePltBtiPac (BTI and PAC)934//935// When PAC is enabled the dynamic loader encrypts the address that it places936// in the .got.plt using the pacia1716 instruction which encrypts the value in937// x17 using the modifier in x16. The static linker places autia1716 before the938// indirect branch to x17 to authenticate the address in x17 with the modifier939// in x16. This makes it more difficult for an attacker to modify the value in940// the .got.plt.941//942// When BTI is enabled all indirect branches must land on a bti instruction.943// The static linker must place a bti instruction at the start of any PLT entry944// that may be the target of an indirect branch. As the PLT entries call the945// lazy resolver indirectly this must have a bti instruction at start. In946// general a bti instruction is not needed for a PLT entry as indirect calls947// are resolved to the function address and not the PLT entry for the function.948// There are a small number of cases where the PLT address can escape, such as949// taking the address of a function or ifunc via a non got-generating950// relocation, and a shared library refers to that symbol.951//952// We use the bti c variant of the instruction which permits indirect branches953// (br) via x16/x17 and indirect function calls (blr) via any register. The ABI954// guarantees that all indirect branches from code requiring BTI protection955// will go via x16/x17956957namespace {958class AArch64BtiPac final : public AArch64 {959public:960AArch64BtiPac();961void writePltHeader(uint8_t *buf) const override;962void writePlt(uint8_t *buf, const Symbol &sym,963uint64_t pltEntryAddr) const override;964965private:966bool btiHeader; // bti instruction needed in PLT Header and Entry967bool pacEntry; // autia1716 instruction needed in PLT Entry968};969} // namespace970971AArch64BtiPac::AArch64BtiPac() {972btiHeader = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);973// A BTI (Branch Target Indicator) Plt Entry is only required if the974// address of the PLT entry can be taken by the program, which permits an975// indirect jump to the PLT entry. This can happen when the address976// of the PLT entry for a function is canonicalised due to the address of977// the function in an executable being taken by a shared library, or978// non-preemptible ifunc referenced by non-GOT-generating, non-PLT-generating979// relocations.980// The PAC PLT entries require dynamic loader support and this isn't known981// from properties in the objects, so we use the command line flag.982pacEntry = config->zPacPlt;983984if (btiHeader || pacEntry) {985pltEntrySize = 24;986ipltEntrySize = 24;987}988}989990void AArch64BtiPac::writePltHeader(uint8_t *buf) const {991const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c992const uint8_t pltData[] = {9930xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]!9940x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[2]))9950x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[2]))]9960x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[2]))9970x20, 0x02, 0x1f, 0xd6, // br x179980x1f, 0x20, 0x03, 0xd5, // nop9990x1f, 0x20, 0x03, 0xd5 // nop1000};1001const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop10021003uint64_t got = in.gotPlt->getVA();1004uint64_t plt = in.plt->getVA();10051006if (btiHeader) {1007// PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C1008// instruction.1009memcpy(buf, btiData, sizeof(btiData));1010buf += sizeof(btiData);1011plt += sizeof(btiData);1012}1013memcpy(buf, pltData, sizeof(pltData));10141015relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,1016getAArch64Page(got + 16) - getAArch64Page(plt + 8));1017relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);1018relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);1019if (!btiHeader)1020// We didn't add the BTI c instruction so round out size with NOP.1021memcpy(buf + sizeof(pltData), nopData, sizeof(nopData));1022}10231024void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,1025uint64_t pltEntryAddr) const {1026// The PLT entry is of the form:1027// [btiData] addrInst (pacBr | stdBr) [nopData]1028const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c1029const uint8_t addrInst[] = {10300x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n]))10310x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[n]))]10320x10, 0x02, 0x00, 0x91 // add x16, x16, Offset(&(.got.plt[n]))1033};1034const uint8_t pacBr[] = {10350x9f, 0x21, 0x03, 0xd5, // autia171610360x20, 0x02, 0x1f, 0xd6 // br x171037};1038const uint8_t stdBr[] = {10390x20, 0x02, 0x1f, 0xd6, // br x1710400x1f, 0x20, 0x03, 0xd5 // nop1041};1042const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop10431044// NEEDS_COPY indicates a non-ifunc canonical PLT entry whose address may1045// escape to shared objects. isInIplt indicates a non-preemptible ifunc. Its1046// address may escape if referenced by a direct relocation. If relative1047// vtables are used then if the vtable is in a shared object the offsets will1048// be to the PLT entry. The condition is conservative.1049bool hasBti = btiHeader &&1050(sym.hasFlag(NEEDS_COPY) || sym.isInIplt || sym.thunkAccessed);1051if (hasBti) {1052memcpy(buf, btiData, sizeof(btiData));1053buf += sizeof(btiData);1054pltEntryAddr += sizeof(btiData);1055}10561057uint64_t gotPltEntryAddr = sym.getGotPltVA();1058memcpy(buf, addrInst, sizeof(addrInst));1059relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,1060getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));1061relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);1062relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);10631064if (pacEntry)1065memcpy(buf + sizeof(addrInst), pacBr, sizeof(pacBr));1066else1067memcpy(buf + sizeof(addrInst), stdBr, sizeof(stdBr));1068if (!hasBti)1069// We didn't add the BTI c instruction so round out size with NOP.1070memcpy(buf + sizeof(addrInst) + sizeof(stdBr), nopData, sizeof(nopData));1071}10721073static TargetInfo *getTargetInfo() {1074if ((config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ||1075config->zPacPlt) {1076static AArch64BtiPac t;1077return &t;1078}1079static AArch64 t;1080return &t;1081}10821083TargetInfo *elf::getAArch64TargetInfo() { return getTargetInfo(); }10841085template <class ELFT>1086static void1087addTaggedSymbolReferences(InputSectionBase &sec,1088DenseMap<Symbol *, unsigned> &referenceCount) {1089assert(sec.type == SHT_AARCH64_MEMTAG_GLOBALS_STATIC);10901091const RelsOrRelas<ELFT> rels = sec.relsOrRelas<ELFT>();1092if (rels.areRelocsRel())1093error("non-RELA relocations are not allowed with memtag globals");10941095for (const typename ELFT::Rela &rel : rels.relas) {1096Symbol &sym = sec.file->getRelocTargetSym(rel);1097// Linker-synthesized symbols such as __executable_start may be referenced1098// as tagged in input objfiles, and we don't want them to be tagged. A1099// cheap way to exclude them is the type check, but their type is1100// STT_NOTYPE. In addition, this save us from checking untaggable symbols,1101// like functions or TLS symbols.1102if (sym.type != STT_OBJECT)1103continue;1104// STB_LOCAL symbols can't be referenced from outside the object file, and1105// thus don't need to be checked for references from other object files.1106if (sym.binding == STB_LOCAL) {1107sym.setIsTagged(true);1108continue;1109}1110++referenceCount[&sym];1111}1112sec.markDead();1113}11141115// A tagged symbol must be denoted as being tagged by all references and the1116// chosen definition. For simplicity, here, it must also be denoted as tagged1117// for all definitions. Otherwise:1118//1119// 1. A tagged definition can be used by an untagged declaration, in which case1120// the untagged access may be PC-relative, causing a tag mismatch at1121// runtime.1122// 2. An untagged definition can be used by a tagged declaration, where the1123// compiler has taken advantage of the increased alignment of the tagged1124// declaration, but the alignment at runtime is wrong, causing a fault.1125//1126// Ideally, this isn't a problem, as any TU that imports or exports tagged1127// symbols should also be built with tagging. But, to handle these cases, we1128// demote the symbol to be untagged.1129void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {1130assert(hasMemtag());11311132// First, collect all symbols that are marked as tagged, and count how many1133// times they're marked as tagged.1134DenseMap<Symbol *, unsigned> taggedSymbolReferenceCount;1135for (InputFile* file : files) {1136if (file->kind() != InputFile::ObjKind)1137continue;1138for (InputSectionBase *section : file->getSections()) {1139if (!section || section->type != SHT_AARCH64_MEMTAG_GLOBALS_STATIC ||1140section == &InputSection::discarded)1141continue;1142invokeELFT(addTaggedSymbolReferences, *section,1143taggedSymbolReferenceCount);1144}1145}11461147// Now, go through all the symbols. If the number of declarations +1148// definitions to a symbol exceeds the amount of times they're marked as1149// tagged, it means we have an objfile that uses the untagged variant of the1150// symbol.1151for (InputFile *file : files) {1152if (file->kind() != InputFile::BinaryKind &&1153file->kind() != InputFile::ObjKind)1154continue;11551156for (Symbol *symbol : file->getSymbols()) {1157// See `addTaggedSymbolReferences` for more details.1158if (symbol->type != STT_OBJECT ||1159symbol->binding == STB_LOCAL)1160continue;1161auto it = taggedSymbolReferenceCount.find(symbol);1162if (it == taggedSymbolReferenceCount.end()) continue;1163unsigned &remainingAllowedTaggedRefs = it->second;1164if (remainingAllowedTaggedRefs == 0) {1165taggedSymbolReferenceCount.erase(it);1166continue;1167}1168--remainingAllowedTaggedRefs;1169}1170}11711172// `addTaggedSymbolReferences` has already checked that we have RELA1173// relocations, the only other way to get written addends is with1174// --apply-dynamic-relocs.1175if (!taggedSymbolReferenceCount.empty() && config->writeAddends)1176error("--apply-dynamic-relocs cannot be used with MTE globals");11771178// Now, `taggedSymbolReferenceCount` should only contain symbols that are1179// defined as tagged exactly the same amount as it's referenced, meaning all1180// uses are tagged.1181for (auto &[symbol, remainingTaggedRefs] : taggedSymbolReferenceCount) {1182assert(remainingTaggedRefs == 0 &&1183"Symbol is defined as tagged more times than it's used");1184symbol->setIsTagged(true);1185}1186}118711881189