Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/lld/ELF/Arch/RISCV.cpp
34878 views
1
//===- RISCV.cpp ----------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "InputFiles.h"
10
#include "OutputSections.h"
11
#include "Symbols.h"
12
#include "SyntheticSections.h"
13
#include "Target.h"
14
#include "llvm/Support/ELFAttributes.h"
15
#include "llvm/Support/LEB128.h"
16
#include "llvm/Support/RISCVAttributeParser.h"
17
#include "llvm/Support/RISCVAttributes.h"
18
#include "llvm/Support/TimeProfiler.h"
19
#include "llvm/TargetParser/RISCVISAInfo.h"
20
21
using namespace llvm;
22
using namespace llvm::object;
23
using namespace llvm::support::endian;
24
using namespace llvm::ELF;
25
using namespace lld;
26
using namespace lld::elf;
27
28
namespace {
29
30
class RISCV final : public TargetInfo {
31
public:
32
RISCV();
33
uint32_t calcEFlags() const override;
34
int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
35
void writeGotHeader(uint8_t *buf) const override;
36
void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
37
void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
38
void writePltHeader(uint8_t *buf) const override;
39
void writePlt(uint8_t *buf, const Symbol &sym,
40
uint64_t pltEntryAddr) const override;
41
RelType getDynRel(RelType type) const override;
42
RelExpr getRelExpr(RelType type, const Symbol &s,
43
const uint8_t *loc) const override;
44
void relocate(uint8_t *loc, const Relocation &rel,
45
uint64_t val) const override;
46
void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
47
bool relaxOnce(int pass) const override;
48
void finalizeRelax(int passes) const override;
49
};
50
51
} // end anonymous namespace
52
53
// These are internal relocation numbers for GP relaxation. They aren't part
54
// of the psABI spec.
55
#define INTERNAL_R_RISCV_GPREL_I 256
56
#define INTERNAL_R_RISCV_GPREL_S 257
57
58
const uint64_t dtpOffset = 0x800;
59
60
namespace {
61
enum Op {
62
ADDI = 0x13,
63
AUIPC = 0x17,
64
JALR = 0x67,
65
LD = 0x3003,
66
LUI = 0x37,
67
LW = 0x2003,
68
SRLI = 0x5013,
69
SUB = 0x40000033,
70
};
71
72
enum Reg {
73
X_RA = 1,
74
X_GP = 3,
75
X_TP = 4,
76
X_T0 = 5,
77
X_T1 = 6,
78
X_T2 = 7,
79
X_A0 = 10,
80
X_T3 = 28,
81
};
82
} // namespace
83
84
static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
85
static uint32_t lo12(uint32_t val) { return val & 4095; }
86
87
static uint32_t itype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t imm) {
88
return op | (rd << 7) | (rs1 << 15) | (imm << 20);
89
}
90
static uint32_t rtype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t rs2) {
91
return op | (rd << 7) | (rs1 << 15) | (rs2 << 20);
92
}
93
static uint32_t utype(uint32_t op, uint32_t rd, uint32_t imm) {
94
return op | (rd << 7) | (imm << 12);
95
}
96
97
// Extract bits v[begin:end], where range is inclusive, and begin must be < 63.
98
static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
99
return (v & ((1ULL << (begin + 1)) - 1)) >> end;
100
}
101
102
static uint32_t setLO12_I(uint32_t insn, uint32_t imm) {
103
return (insn & 0xfffff) | (imm << 20);
104
}
105
static uint32_t setLO12_S(uint32_t insn, uint32_t imm) {
106
return (insn & 0x1fff07f) | (extractBits(imm, 11, 5) << 25) |
107
(extractBits(imm, 4, 0) << 7);
108
}
109
110
RISCV::RISCV() {
111
copyRel = R_RISCV_COPY;
112
pltRel = R_RISCV_JUMP_SLOT;
113
relativeRel = R_RISCV_RELATIVE;
114
iRelativeRel = R_RISCV_IRELATIVE;
115
if (config->is64) {
116
symbolicRel = R_RISCV_64;
117
tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64;
118
tlsOffsetRel = R_RISCV_TLS_DTPREL64;
119
tlsGotRel = R_RISCV_TLS_TPREL64;
120
} else {
121
symbolicRel = R_RISCV_32;
122
tlsModuleIndexRel = R_RISCV_TLS_DTPMOD32;
123
tlsOffsetRel = R_RISCV_TLS_DTPREL32;
124
tlsGotRel = R_RISCV_TLS_TPREL32;
125
}
126
gotRel = symbolicRel;
127
tlsDescRel = R_RISCV_TLSDESC;
128
129
// .got[0] = _DYNAMIC
130
gotHeaderEntriesNum = 1;
131
132
// .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
133
gotPltHeaderEntriesNum = 2;
134
135
pltHeaderSize = 32;
136
pltEntrySize = 16;
137
ipltEntrySize = 16;
138
}
139
140
static uint32_t getEFlags(InputFile *f) {
141
if (config->is64)
142
return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
143
return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
144
}
145
146
uint32_t RISCV::calcEFlags() const {
147
// If there are only binary input files (from -b binary), use a
148
// value of 0 for the ELF header flags.
149
if (ctx.objectFiles.empty())
150
return 0;
151
152
uint32_t target = getEFlags(ctx.objectFiles.front());
153
154
for (InputFile *f : ctx.objectFiles) {
155
uint32_t eflags = getEFlags(f);
156
if (eflags & EF_RISCV_RVC)
157
target |= EF_RISCV_RVC;
158
159
if ((eflags & EF_RISCV_FLOAT_ABI) != (target & EF_RISCV_FLOAT_ABI))
160
error(
161
toString(f) +
162
": cannot link object files with different floating-point ABI from " +
163
toString(ctx.objectFiles[0]));
164
165
if ((eflags & EF_RISCV_RVE) != (target & EF_RISCV_RVE))
166
error(toString(f) +
167
": cannot link object files with different EF_RISCV_RVE");
168
}
169
170
return target;
171
}
172
173
int64_t RISCV::getImplicitAddend(const uint8_t *buf, RelType type) const {
174
switch (type) {
175
default:
176
internalLinkerError(getErrorLocation(buf),
177
"cannot read addend for relocation " + toString(type));
178
return 0;
179
case R_RISCV_32:
180
case R_RISCV_TLS_DTPMOD32:
181
case R_RISCV_TLS_DTPREL32:
182
case R_RISCV_TLS_TPREL32:
183
return SignExtend64<32>(read32le(buf));
184
case R_RISCV_64:
185
case R_RISCV_TLS_DTPMOD64:
186
case R_RISCV_TLS_DTPREL64:
187
case R_RISCV_TLS_TPREL64:
188
return read64le(buf);
189
case R_RISCV_RELATIVE:
190
case R_RISCV_IRELATIVE:
191
return config->is64 ? read64le(buf) : read32le(buf);
192
case R_RISCV_NONE:
193
case R_RISCV_JUMP_SLOT:
194
// These relocations are defined as not having an implicit addend.
195
return 0;
196
case R_RISCV_TLSDESC:
197
return config->is64 ? read64le(buf + 8) : read32le(buf + 4);
198
}
199
}
200
201
void RISCV::writeGotHeader(uint8_t *buf) const {
202
if (config->is64)
203
write64le(buf, mainPart->dynamic->getVA());
204
else
205
write32le(buf, mainPart->dynamic->getVA());
206
}
207
208
void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
209
if (config->is64)
210
write64le(buf, in.plt->getVA());
211
else
212
write32le(buf, in.plt->getVA());
213
}
214
215
void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
216
if (config->writeAddends) {
217
if (config->is64)
218
write64le(buf, s.getVA());
219
else
220
write32le(buf, s.getVA());
221
}
222
}
223
224
void RISCV::writePltHeader(uint8_t *buf) const {
225
// 1: auipc t2, %pcrel_hi(.got.plt)
226
// sub t1, t1, t3
227
// l[wd] t3, %pcrel_lo(1b)(t2); t3 = _dl_runtime_resolve
228
// addi t1, t1, -pltHeaderSize-12; t1 = &.plt[i] - &.plt[0]
229
// addi t0, t2, %pcrel_lo(1b)
230
// srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0]
231
// l[wd] t0, Wordsize(t0); t0 = link_map
232
// jr t3
233
uint32_t offset = in.gotPlt->getVA() - in.plt->getVA();
234
uint32_t load = config->is64 ? LD : LW;
235
write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
236
write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
237
write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
238
write32le(buf + 12, itype(ADDI, X_T1, X_T1, -target->pltHeaderSize - 12));
239
write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
240
write32le(buf + 20, itype(SRLI, X_T1, X_T1, config->is64 ? 1 : 2));
241
write32le(buf + 24, itype(load, X_T0, X_T0, config->wordsize));
242
write32le(buf + 28, itype(JALR, 0, X_T3, 0));
243
}
244
245
void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
246
uint64_t pltEntryAddr) const {
247
// 1: auipc t3, %pcrel_hi([email protected])
248
// l[wd] t3, %pcrel_lo(1b)(t3)
249
// jalr t1, t3
250
// nop
251
uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
252
write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
253
write32le(buf + 4, itype(config->is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
254
write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
255
write32le(buf + 12, itype(ADDI, 0, 0, 0));
256
}
257
258
RelType RISCV::getDynRel(RelType type) const {
259
return type == target->symbolicRel ? type
260
: static_cast<RelType>(R_RISCV_NONE);
261
}
262
263
RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
264
const uint8_t *loc) const {
265
switch (type) {
266
case R_RISCV_NONE:
267
return R_NONE;
268
case R_RISCV_32:
269
case R_RISCV_64:
270
case R_RISCV_HI20:
271
case R_RISCV_LO12_I:
272
case R_RISCV_LO12_S:
273
case R_RISCV_RVC_LUI:
274
return R_ABS;
275
case R_RISCV_ADD8:
276
case R_RISCV_ADD16:
277
case R_RISCV_ADD32:
278
case R_RISCV_ADD64:
279
case R_RISCV_SET6:
280
case R_RISCV_SET8:
281
case R_RISCV_SET16:
282
case R_RISCV_SET32:
283
case R_RISCV_SUB6:
284
case R_RISCV_SUB8:
285
case R_RISCV_SUB16:
286
case R_RISCV_SUB32:
287
case R_RISCV_SUB64:
288
return R_RISCV_ADD;
289
case R_RISCV_JAL:
290
case R_RISCV_BRANCH:
291
case R_RISCV_PCREL_HI20:
292
case R_RISCV_RVC_BRANCH:
293
case R_RISCV_RVC_JUMP:
294
case R_RISCV_32_PCREL:
295
return R_PC;
296
case R_RISCV_CALL:
297
case R_RISCV_CALL_PLT:
298
case R_RISCV_PLT32:
299
return R_PLT_PC;
300
case R_RISCV_GOT_HI20:
301
case R_RISCV_GOT32_PCREL:
302
return R_GOT_PC;
303
case R_RISCV_PCREL_LO12_I:
304
case R_RISCV_PCREL_LO12_S:
305
return R_RISCV_PC_INDIRECT;
306
case R_RISCV_TLSDESC_HI20:
307
case R_RISCV_TLSDESC_LOAD_LO12:
308
case R_RISCV_TLSDESC_ADD_LO12:
309
return R_TLSDESC_PC;
310
case R_RISCV_TLSDESC_CALL:
311
return R_TLSDESC_CALL;
312
case R_RISCV_TLS_GD_HI20:
313
return R_TLSGD_PC;
314
case R_RISCV_TLS_GOT_HI20:
315
return R_GOT_PC;
316
case R_RISCV_TPREL_HI20:
317
case R_RISCV_TPREL_LO12_I:
318
case R_RISCV_TPREL_LO12_S:
319
return R_TPREL;
320
case R_RISCV_ALIGN:
321
return R_RELAX_HINT;
322
case R_RISCV_TPREL_ADD:
323
case R_RISCV_RELAX:
324
return config->relax ? R_RELAX_HINT : R_NONE;
325
case R_RISCV_SET_ULEB128:
326
case R_RISCV_SUB_ULEB128:
327
return R_RISCV_LEB128;
328
default:
329
error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
330
") against symbol " + toString(s));
331
return R_NONE;
332
}
333
}
334
335
void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
336
const unsigned bits = config->wordsize * 8;
337
338
switch (rel.type) {
339
case R_RISCV_32:
340
write32le(loc, val);
341
return;
342
case R_RISCV_64:
343
write64le(loc, val);
344
return;
345
346
case R_RISCV_RVC_BRANCH: {
347
checkInt(loc, val, 9, rel);
348
checkAlignment(loc, val, 2, rel);
349
uint16_t insn = read16le(loc) & 0xE383;
350
uint16_t imm8 = extractBits(val, 8, 8) << 12;
351
uint16_t imm4_3 = extractBits(val, 4, 3) << 10;
352
uint16_t imm7_6 = extractBits(val, 7, 6) << 5;
353
uint16_t imm2_1 = extractBits(val, 2, 1) << 3;
354
uint16_t imm5 = extractBits(val, 5, 5) << 2;
355
insn |= imm8 | imm4_3 | imm7_6 | imm2_1 | imm5;
356
357
write16le(loc, insn);
358
return;
359
}
360
361
case R_RISCV_RVC_JUMP: {
362
checkInt(loc, val, 12, rel);
363
checkAlignment(loc, val, 2, rel);
364
uint16_t insn = read16le(loc) & 0xE003;
365
uint16_t imm11 = extractBits(val, 11, 11) << 12;
366
uint16_t imm4 = extractBits(val, 4, 4) << 11;
367
uint16_t imm9_8 = extractBits(val, 9, 8) << 9;
368
uint16_t imm10 = extractBits(val, 10, 10) << 8;
369
uint16_t imm6 = extractBits(val, 6, 6) << 7;
370
uint16_t imm7 = extractBits(val, 7, 7) << 6;
371
uint16_t imm3_1 = extractBits(val, 3, 1) << 3;
372
uint16_t imm5 = extractBits(val, 5, 5) << 2;
373
insn |= imm11 | imm4 | imm9_8 | imm10 | imm6 | imm7 | imm3_1 | imm5;
374
375
write16le(loc, insn);
376
return;
377
}
378
379
case R_RISCV_RVC_LUI: {
380
int64_t imm = SignExtend64(val + 0x800, bits) >> 12;
381
checkInt(loc, imm, 6, rel);
382
if (imm == 0) { // `c.lui rd, 0` is illegal, convert to `c.li rd, 0`
383
write16le(loc, (read16le(loc) & 0x0F83) | 0x4000);
384
} else {
385
uint16_t imm17 = extractBits(val + 0x800, 17, 17) << 12;
386
uint16_t imm16_12 = extractBits(val + 0x800, 16, 12) << 2;
387
write16le(loc, (read16le(loc) & 0xEF83) | imm17 | imm16_12);
388
}
389
return;
390
}
391
392
case R_RISCV_JAL: {
393
checkInt(loc, val, 21, rel);
394
checkAlignment(loc, val, 2, rel);
395
396
uint32_t insn = read32le(loc) & 0xFFF;
397
uint32_t imm20 = extractBits(val, 20, 20) << 31;
398
uint32_t imm10_1 = extractBits(val, 10, 1) << 21;
399
uint32_t imm11 = extractBits(val, 11, 11) << 20;
400
uint32_t imm19_12 = extractBits(val, 19, 12) << 12;
401
insn |= imm20 | imm10_1 | imm11 | imm19_12;
402
403
write32le(loc, insn);
404
return;
405
}
406
407
case R_RISCV_BRANCH: {
408
checkInt(loc, val, 13, rel);
409
checkAlignment(loc, val, 2, rel);
410
411
uint32_t insn = read32le(loc) & 0x1FFF07F;
412
uint32_t imm12 = extractBits(val, 12, 12) << 31;
413
uint32_t imm10_5 = extractBits(val, 10, 5) << 25;
414
uint32_t imm4_1 = extractBits(val, 4, 1) << 8;
415
uint32_t imm11 = extractBits(val, 11, 11) << 7;
416
insn |= imm12 | imm10_5 | imm4_1 | imm11;
417
418
write32le(loc, insn);
419
return;
420
}
421
422
// auipc + jalr pair
423
case R_RISCV_CALL:
424
case R_RISCV_CALL_PLT: {
425
int64_t hi = SignExtend64(val + 0x800, bits) >> 12;
426
checkInt(loc, hi, 20, rel);
427
if (isInt<20>(hi)) {
428
relocateNoSym(loc, R_RISCV_PCREL_HI20, val);
429
relocateNoSym(loc + 4, R_RISCV_PCREL_LO12_I, val);
430
}
431
return;
432
}
433
434
case R_RISCV_GOT_HI20:
435
case R_RISCV_PCREL_HI20:
436
case R_RISCV_TLSDESC_HI20:
437
case R_RISCV_TLS_GD_HI20:
438
case R_RISCV_TLS_GOT_HI20:
439
case R_RISCV_TPREL_HI20:
440
case R_RISCV_HI20: {
441
uint64_t hi = val + 0x800;
442
checkInt(loc, SignExtend64(hi, bits) >> 12, 20, rel);
443
write32le(loc, (read32le(loc) & 0xFFF) | (hi & 0xFFFFF000));
444
return;
445
}
446
447
case R_RISCV_PCREL_LO12_I:
448
case R_RISCV_TLSDESC_LOAD_LO12:
449
case R_RISCV_TLSDESC_ADD_LO12:
450
case R_RISCV_TPREL_LO12_I:
451
case R_RISCV_LO12_I: {
452
uint64_t hi = (val + 0x800) >> 12;
453
uint64_t lo = val - (hi << 12);
454
write32le(loc, setLO12_I(read32le(loc), lo & 0xfff));
455
return;
456
}
457
458
case R_RISCV_PCREL_LO12_S:
459
case R_RISCV_TPREL_LO12_S:
460
case R_RISCV_LO12_S: {
461
uint64_t hi = (val + 0x800) >> 12;
462
uint64_t lo = val - (hi << 12);
463
write32le(loc, setLO12_S(read32le(loc), lo));
464
return;
465
}
466
467
case INTERNAL_R_RISCV_GPREL_I:
468
case INTERNAL_R_RISCV_GPREL_S: {
469
Defined *gp = ElfSym::riscvGlobalPointer;
470
int64_t displace = SignExtend64(val - gp->getVA(), bits);
471
checkInt(loc, displace, 12, rel);
472
uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15);
473
if (rel.type == INTERNAL_R_RISCV_GPREL_I)
474
insn = setLO12_I(insn, displace);
475
else
476
insn = setLO12_S(insn, displace);
477
write32le(loc, insn);
478
return;
479
}
480
481
case R_RISCV_ADD8:
482
*loc += val;
483
return;
484
case R_RISCV_ADD16:
485
write16le(loc, read16le(loc) + val);
486
return;
487
case R_RISCV_ADD32:
488
write32le(loc, read32le(loc) + val);
489
return;
490
case R_RISCV_ADD64:
491
write64le(loc, read64le(loc) + val);
492
return;
493
case R_RISCV_SUB6:
494
*loc = (*loc & 0xc0) | (((*loc & 0x3f) - val) & 0x3f);
495
return;
496
case R_RISCV_SUB8:
497
*loc -= val;
498
return;
499
case R_RISCV_SUB16:
500
write16le(loc, read16le(loc) - val);
501
return;
502
case R_RISCV_SUB32:
503
write32le(loc, read32le(loc) - val);
504
return;
505
case R_RISCV_SUB64:
506
write64le(loc, read64le(loc) - val);
507
return;
508
case R_RISCV_SET6:
509
*loc = (*loc & 0xc0) | (val & 0x3f);
510
return;
511
case R_RISCV_SET8:
512
*loc = val;
513
return;
514
case R_RISCV_SET16:
515
write16le(loc, val);
516
return;
517
case R_RISCV_SET32:
518
case R_RISCV_32_PCREL:
519
case R_RISCV_PLT32:
520
case R_RISCV_GOT32_PCREL:
521
checkInt(loc, val, 32, rel);
522
write32le(loc, val);
523
return;
524
525
case R_RISCV_TLS_DTPREL32:
526
write32le(loc, val - dtpOffset);
527
break;
528
case R_RISCV_TLS_DTPREL64:
529
write64le(loc, val - dtpOffset);
530
break;
531
532
case R_RISCV_RELAX:
533
return;
534
case R_RISCV_TLSDESC:
535
// The addend is stored in the second word.
536
if (config->is64)
537
write64le(loc + 8, val);
538
else
539
write32le(loc + 4, val);
540
break;
541
default:
542
llvm_unreachable("unknown relocation");
543
}
544
}
545
546
static bool relaxable(ArrayRef<Relocation> relocs, size_t i) {
547
return i + 1 != relocs.size() && relocs[i + 1].type == R_RISCV_RELAX;
548
}
549
550
static void tlsdescToIe(uint8_t *loc, const Relocation &rel, uint64_t val) {
551
switch (rel.type) {
552
case R_RISCV_TLSDESC_HI20:
553
case R_RISCV_TLSDESC_LOAD_LO12:
554
write32le(loc, 0x00000013); // nop
555
break;
556
case R_RISCV_TLSDESC_ADD_LO12:
557
write32le(loc, utype(AUIPC, X_A0, hi20(val))); // auipc a0,<hi20>
558
break;
559
case R_RISCV_TLSDESC_CALL:
560
if (config->is64)
561
write32le(loc, itype(LD, X_A0, X_A0, lo12(val))); // ld a0,<lo12>(a0)
562
else
563
write32le(loc, itype(LW, X_A0, X_A0, lo12(val))); // lw a0,<lo12>(a0)
564
break;
565
default:
566
llvm_unreachable("unsupported relocation for TLSDESC to IE");
567
}
568
}
569
570
static void tlsdescToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {
571
switch (rel.type) {
572
case R_RISCV_TLSDESC_HI20:
573
case R_RISCV_TLSDESC_LOAD_LO12:
574
write32le(loc, 0x00000013); // nop
575
return;
576
case R_RISCV_TLSDESC_ADD_LO12:
577
if (isInt<12>(val))
578
write32le(loc, 0x00000013); // nop
579
else
580
write32le(loc, utype(LUI, X_A0, hi20(val))); // lui a0,<hi20>
581
return;
582
case R_RISCV_TLSDESC_CALL:
583
if (isInt<12>(val))
584
write32le(loc, itype(ADDI, X_A0, 0, val)); // addi a0,zero,<lo12>
585
else
586
write32le(loc, itype(ADDI, X_A0, X_A0, lo12(val))); // addi a0,a0,<lo12>
587
return;
588
default:
589
llvm_unreachable("unsupported relocation for TLSDESC to LE");
590
}
591
}
592
593
void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
594
uint64_t secAddr = sec.getOutputSection()->addr;
595
if (auto *s = dyn_cast<InputSection>(&sec))
596
secAddr += s->outSecOff;
597
else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
598
secAddr += ehIn->getParent()->outSecOff;
599
uint64_t tlsdescVal = 0;
600
bool tlsdescRelax = false, isToLe = false;
601
const ArrayRef<Relocation> relocs = sec.relocs();
602
for (size_t i = 0, size = relocs.size(); i != size; ++i) {
603
const Relocation &rel = relocs[i];
604
uint8_t *loc = buf + rel.offset;
605
uint64_t val =
606
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
607
secAddr + rel.offset, *rel.sym, rel.expr);
608
609
switch (rel.expr) {
610
case R_RELAX_HINT:
611
continue;
612
case R_TLSDESC_PC:
613
// For R_RISCV_TLSDESC_HI20, store &got(sym)-PC to be used by the
614
// following two instructions L[DW] and ADDI.
615
if (rel.type == R_RISCV_TLSDESC_HI20)
616
tlsdescVal = val;
617
else
618
val = tlsdescVal;
619
break;
620
case R_RELAX_TLS_GD_TO_IE:
621
// Only R_RISCV_TLSDESC_HI20 reaches here. tlsdescVal will be finalized
622
// after we see R_RISCV_TLSDESC_ADD_LO12 in the R_RELAX_TLS_GD_TO_LE case.
623
// The net effect is that tlsdescVal will be smaller than `val` to take
624
// into account of NOP instructions (in the absence of R_RISCV_RELAX)
625
// before AUIPC.
626
tlsdescVal = val + rel.offset;
627
isToLe = false;
628
tlsdescRelax = relaxable(relocs, i);
629
if (!tlsdescRelax)
630
tlsdescToIe(loc, rel, val);
631
continue;
632
case R_RELAX_TLS_GD_TO_LE:
633
// See the comment in handleTlsRelocation. For TLSDESC=>IE,
634
// R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12,CALL} also reach here. If isToLe is
635
// false, this is actually TLSDESC=>IE optimization.
636
if (rel.type == R_RISCV_TLSDESC_HI20) {
637
tlsdescVal = val;
638
isToLe = true;
639
tlsdescRelax = relaxable(relocs, i);
640
} else {
641
if (!isToLe && rel.type == R_RISCV_TLSDESC_ADD_LO12)
642
tlsdescVal -= rel.offset;
643
val = tlsdescVal;
644
}
645
// When NOP conversion is eligible and relaxation applies, don't write a
646
// NOP in case an unrelated instruction follows the current instruction.
647
if (tlsdescRelax &&
648
(rel.type == R_RISCV_TLSDESC_HI20 ||
649
rel.type == R_RISCV_TLSDESC_LOAD_LO12 ||
650
(rel.type == R_RISCV_TLSDESC_ADD_LO12 && isToLe && !hi20(val))))
651
continue;
652
if (isToLe)
653
tlsdescToLe(loc, rel, val);
654
else
655
tlsdescToIe(loc, rel, val);
656
continue;
657
case R_RISCV_LEB128:
658
if (i + 1 < size) {
659
const Relocation &rel1 = relocs[i + 1];
660
if (rel.type == R_RISCV_SET_ULEB128 &&
661
rel1.type == R_RISCV_SUB_ULEB128 && rel.offset == rel1.offset) {
662
auto val = rel.sym->getVA(rel.addend) - rel1.sym->getVA(rel1.addend);
663
if (overwriteULEB128(loc, val) >= 0x80)
664
errorOrWarn(sec.getLocation(rel.offset) + ": ULEB128 value " +
665
Twine(val) + " exceeds available space; references '" +
666
lld::toString(*rel.sym) + "'");
667
++i;
668
continue;
669
}
670
}
671
errorOrWarn(sec.getLocation(rel.offset) +
672
": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128");
673
return;
674
default:
675
break;
676
}
677
relocate(loc, rel, val);
678
}
679
}
680
681
void elf::initSymbolAnchors() {
682
SmallVector<InputSection *, 0> storage;
683
for (OutputSection *osec : outputSections) {
684
if (!(osec->flags & SHF_EXECINSTR))
685
continue;
686
for (InputSection *sec : getInputSections(*osec, storage)) {
687
sec->relaxAux = make<RelaxAux>();
688
if (sec->relocs().size()) {
689
sec->relaxAux->relocDeltas =
690
std::make_unique<uint32_t[]>(sec->relocs().size());
691
sec->relaxAux->relocTypes =
692
std::make_unique<RelType[]>(sec->relocs().size());
693
}
694
}
695
}
696
// Store anchors (st_value and st_value+st_size) for symbols relative to text
697
// sections.
698
//
699
// For a defined symbol foo, we may have `d->file != file` with --wrap=foo.
700
// We should process foo, as the defining object file's symbol table may not
701
// contain foo after redirectSymbols changed the foo entry to __wrap_foo. To
702
// avoid adding a Defined that is undefined in one object file, use
703
// `!d->scriptDefined` to exclude symbols that are definitely not wrapped.
704
//
705
// `relaxAux->anchors` may contain duplicate symbols, but that is fine.
706
for (InputFile *file : ctx.objectFiles)
707
for (Symbol *sym : file->getSymbols()) {
708
auto *d = dyn_cast<Defined>(sym);
709
if (!d || (d->file != file && !d->scriptDefined))
710
continue;
711
if (auto *sec = dyn_cast_or_null<InputSection>(d->section))
712
if (sec->flags & SHF_EXECINSTR && sec->relaxAux) {
713
// If sec is discarded, relaxAux will be nullptr.
714
sec->relaxAux->anchors.push_back({d->value, d, false});
715
sec->relaxAux->anchors.push_back({d->value + d->size, d, true});
716
}
717
}
718
// Sort anchors by offset so that we can find the closest relocation
719
// efficiently. For a zero size symbol, ensure that its start anchor precedes
720
// its end anchor. For two symbols with anchors at the same offset, their
721
// order does not matter.
722
for (OutputSection *osec : outputSections) {
723
if (!(osec->flags & SHF_EXECINSTR))
724
continue;
725
for (InputSection *sec : getInputSections(*osec, storage)) {
726
llvm::sort(sec->relaxAux->anchors, [](auto &a, auto &b) {
727
return std::make_pair(a.offset, a.end) <
728
std::make_pair(b.offset, b.end);
729
});
730
}
731
}
732
}
733
734
// Relax R_RISCV_CALL/R_RISCV_CALL_PLT auipc+jalr to c.j, c.jal, or jal.
735
static void relaxCall(const InputSection &sec, size_t i, uint64_t loc,
736
Relocation &r, uint32_t &remove) {
737
const bool rvc = getEFlags(sec.file) & EF_RISCV_RVC;
738
const Symbol &sym = *r.sym;
739
const uint64_t insnPair = read64le(sec.content().data() + r.offset);
740
const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
741
const uint64_t dest =
742
(r.expr == R_PLT_PC ? sym.getPltVA() : sym.getVA()) + r.addend;
743
const int64_t displace = dest - loc;
744
745
if (rvc && isInt<12>(displace) && rd == 0) {
746
sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
747
sec.relaxAux->writes.push_back(0xa001); // c.j
748
remove = 6;
749
} else if (rvc && isInt<12>(displace) && rd == X_RA &&
750
!config->is64) { // RV32C only
751
sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
752
sec.relaxAux->writes.push_back(0x2001); // c.jal
753
remove = 6;
754
} else if (isInt<21>(displace)) {
755
sec.relaxAux->relocTypes[i] = R_RISCV_JAL;
756
sec.relaxAux->writes.push_back(0x6f | rd << 7); // jal
757
remove = 4;
758
}
759
}
760
761
// Relax local-exec TLS when hi20 is zero.
762
static void relaxTlsLe(const InputSection &sec, size_t i, uint64_t loc,
763
Relocation &r, uint32_t &remove) {
764
uint64_t val = r.sym->getVA(r.addend);
765
if (hi20(val) != 0)
766
return;
767
uint32_t insn = read32le(sec.content().data() + r.offset);
768
switch (r.type) {
769
case R_RISCV_TPREL_HI20:
770
case R_RISCV_TPREL_ADD:
771
// Remove lui rd, %tprel_hi(x) and add rd, rd, tp, %tprel_add(x).
772
sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
773
remove = 4;
774
break;
775
case R_RISCV_TPREL_LO12_I:
776
// addi rd, rd, %tprel_lo(x) => addi rd, tp, st_value(x)
777
sec.relaxAux->relocTypes[i] = R_RISCV_32;
778
insn = (insn & ~(31 << 15)) | (X_TP << 15);
779
sec.relaxAux->writes.push_back(setLO12_I(insn, val));
780
break;
781
case R_RISCV_TPREL_LO12_S:
782
// sw rs, %tprel_lo(x)(rd) => sw rs, st_value(x)(rd)
783
sec.relaxAux->relocTypes[i] = R_RISCV_32;
784
insn = (insn & ~(31 << 15)) | (X_TP << 15);
785
sec.relaxAux->writes.push_back(setLO12_S(insn, val));
786
break;
787
}
788
}
789
790
static void relaxHi20Lo12(const InputSection &sec, size_t i, uint64_t loc,
791
Relocation &r, uint32_t &remove) {
792
const Defined *gp = ElfSym::riscvGlobalPointer;
793
if (!gp)
794
return;
795
796
if (!isInt<12>(r.sym->getVA(r.addend) - gp->getVA()))
797
return;
798
799
switch (r.type) {
800
case R_RISCV_HI20:
801
// Remove lui rd, %hi20(x).
802
sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
803
remove = 4;
804
break;
805
case R_RISCV_LO12_I:
806
sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_I;
807
break;
808
case R_RISCV_LO12_S:
809
sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_S;
810
break;
811
}
812
}
813
814
static bool relax(InputSection &sec) {
815
const uint64_t secAddr = sec.getVA();
816
const MutableArrayRef<Relocation> relocs = sec.relocs();
817
auto &aux = *sec.relaxAux;
818
bool changed = false;
819
ArrayRef<SymbolAnchor> sa = ArrayRef(aux.anchors);
820
uint64_t delta = 0;
821
bool tlsdescRelax = false, toLeShortForm = false;
822
823
std::fill_n(aux.relocTypes.get(), relocs.size(), R_RISCV_NONE);
824
aux.writes.clear();
825
for (auto [i, r] : llvm::enumerate(relocs)) {
826
const uint64_t loc = secAddr + r.offset - delta;
827
uint32_t &cur = aux.relocDeltas[i], remove = 0;
828
switch (r.type) {
829
case R_RISCV_ALIGN: {
830
const uint64_t nextLoc = loc + r.addend;
831
const uint64_t align = PowerOf2Ceil(r.addend + 2);
832
// All bytes beyond the alignment boundary should be removed.
833
remove = nextLoc - ((loc + align - 1) & -align);
834
// If we can't satisfy this alignment, we've found a bad input.
835
if (LLVM_UNLIKELY(static_cast<int32_t>(remove) < 0)) {
836
errorOrWarn(getErrorLocation((const uint8_t*)loc) +
837
"insufficient padding bytes for " + lld::toString(r.type) +
838
": " + Twine(r.addend) + " bytes available "
839
"for requested alignment of " + Twine(align) + " bytes");
840
remove = 0;
841
}
842
break;
843
}
844
case R_RISCV_CALL:
845
case R_RISCV_CALL_PLT:
846
if (relaxable(relocs, i))
847
relaxCall(sec, i, loc, r, remove);
848
break;
849
case R_RISCV_TPREL_HI20:
850
case R_RISCV_TPREL_ADD:
851
case R_RISCV_TPREL_LO12_I:
852
case R_RISCV_TPREL_LO12_S:
853
if (relaxable(relocs, i))
854
relaxTlsLe(sec, i, loc, r, remove);
855
break;
856
case R_RISCV_HI20:
857
case R_RISCV_LO12_I:
858
case R_RISCV_LO12_S:
859
if (relaxable(relocs, i))
860
relaxHi20Lo12(sec, i, loc, r, remove);
861
break;
862
case R_RISCV_TLSDESC_HI20:
863
// For TLSDESC=>LE, we can use the short form if hi20 is zero.
864
tlsdescRelax = relaxable(relocs, i);
865
toLeShortForm = tlsdescRelax && r.expr == R_RELAX_TLS_GD_TO_LE &&
866
!hi20(r.sym->getVA(r.addend));
867
[[fallthrough]];
868
case R_RISCV_TLSDESC_LOAD_LO12:
869
// For TLSDESC=>LE/IE, AUIPC and L[DW] are removed if relaxable.
870
if (tlsdescRelax && r.expr != R_TLSDESC_PC)
871
remove = 4;
872
break;
873
case R_RISCV_TLSDESC_ADD_LO12:
874
if (toLeShortForm)
875
remove = 4;
876
break;
877
}
878
879
// For all anchors whose offsets are <= r.offset, they are preceded by
880
// the previous relocation whose `relocDeltas` value equals `delta`.
881
// Decrease their st_value and update their st_size.
882
for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1)) {
883
if (sa[0].end)
884
sa[0].d->size = sa[0].offset - delta - sa[0].d->value;
885
else
886
sa[0].d->value = sa[0].offset - delta;
887
}
888
delta += remove;
889
if (delta != cur) {
890
cur = delta;
891
changed = true;
892
}
893
}
894
895
for (const SymbolAnchor &a : sa) {
896
if (a.end)
897
a.d->size = a.offset - delta - a.d->value;
898
else
899
a.d->value = a.offset - delta;
900
}
901
// Inform assignAddresses that the size has changed.
902
if (!isUInt<32>(delta))
903
fatal("section size decrease is too large: " + Twine(delta));
904
sec.bytesDropped = delta;
905
return changed;
906
}
907
908
// When relaxing just R_RISCV_ALIGN, relocDeltas is usually changed only once in
909
// the absence of a linker script. For call and load/store R_RISCV_RELAX, code
910
// shrinkage may reduce displacement and make more relocations eligible for
911
// relaxation. Code shrinkage may increase displacement to a call/load/store
912
// target at a higher fixed address, invalidating an earlier relaxation. Any
913
// change in section sizes can have cascading effect and require another
914
// relaxation pass.
915
bool RISCV::relaxOnce(int pass) const {
916
llvm::TimeTraceScope timeScope("RISC-V relaxOnce");
917
if (config->relocatable)
918
return false;
919
920
if (pass == 0)
921
initSymbolAnchors();
922
923
SmallVector<InputSection *, 0> storage;
924
bool changed = false;
925
for (OutputSection *osec : outputSections) {
926
if (!(osec->flags & SHF_EXECINSTR))
927
continue;
928
for (InputSection *sec : getInputSections(*osec, storage))
929
changed |= relax(*sec);
930
}
931
return changed;
932
}
933
934
void RISCV::finalizeRelax(int passes) const {
935
llvm::TimeTraceScope timeScope("Finalize RISC-V relaxation");
936
log("relaxation passes: " + Twine(passes));
937
SmallVector<InputSection *, 0> storage;
938
for (OutputSection *osec : outputSections) {
939
if (!(osec->flags & SHF_EXECINSTR))
940
continue;
941
for (InputSection *sec : getInputSections(*osec, storage)) {
942
RelaxAux &aux = *sec->relaxAux;
943
if (!aux.relocDeltas)
944
continue;
945
946
MutableArrayRef<Relocation> rels = sec->relocs();
947
ArrayRef<uint8_t> old = sec->content();
948
size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
949
size_t writesIdx = 0;
950
uint8_t *p = context().bAlloc.Allocate<uint8_t>(newSize);
951
uint64_t offset = 0;
952
int64_t delta = 0;
953
sec->content_ = p;
954
sec->size = newSize;
955
sec->bytesDropped = 0;
956
957
// Update section content: remove NOPs for R_RISCV_ALIGN and rewrite
958
// instructions for relaxed relocations.
959
for (size_t i = 0, e = rels.size(); i != e; ++i) {
960
uint32_t remove = aux.relocDeltas[i] - delta;
961
delta = aux.relocDeltas[i];
962
if (remove == 0 && aux.relocTypes[i] == R_RISCV_NONE)
963
continue;
964
965
// Copy from last location to the current relocated location.
966
const Relocation &r = rels[i];
967
uint64_t size = r.offset - offset;
968
memcpy(p, old.data() + offset, size);
969
p += size;
970
971
// For R_RISCV_ALIGN, we will place `offset` in a location (among NOPs)
972
// to satisfy the alignment requirement. If both `remove` and r.addend
973
// are multiples of 4, it is as if we have skipped some NOPs. Otherwise
974
// we are in the middle of a 4-byte NOP, and we need to rewrite the NOP
975
// sequence.
976
int64_t skip = 0;
977
if (r.type == R_RISCV_ALIGN) {
978
if (remove % 4 || r.addend % 4) {
979
skip = r.addend - remove;
980
int64_t j = 0;
981
for (; j + 4 <= skip; j += 4)
982
write32le(p + j, 0x00000013); // nop
983
if (j != skip) {
984
assert(j + 2 == skip);
985
write16le(p + j, 0x0001); // c.nop
986
}
987
}
988
} else if (RelType newType = aux.relocTypes[i]) {
989
switch (newType) {
990
case INTERNAL_R_RISCV_GPREL_I:
991
case INTERNAL_R_RISCV_GPREL_S:
992
break;
993
case R_RISCV_RELAX:
994
// Used by relaxTlsLe to indicate the relocation is ignored.
995
break;
996
case R_RISCV_RVC_JUMP:
997
skip = 2;
998
write16le(p, aux.writes[writesIdx++]);
999
break;
1000
case R_RISCV_JAL:
1001
skip = 4;
1002
write32le(p, aux.writes[writesIdx++]);
1003
break;
1004
case R_RISCV_32:
1005
// Used by relaxTlsLe to write a uint32_t then suppress the handling
1006
// in relocateAlloc.
1007
skip = 4;
1008
write32le(p, aux.writes[writesIdx++]);
1009
aux.relocTypes[i] = R_RISCV_NONE;
1010
break;
1011
default:
1012
llvm_unreachable("unsupported type");
1013
}
1014
}
1015
1016
p += skip;
1017
offset = r.offset + skip + remove;
1018
}
1019
memcpy(p, old.data() + offset, old.size() - offset);
1020
1021
// Subtract the previous relocDeltas value from the relocation offset.
1022
// For a pair of R_RISCV_CALL/R_RISCV_RELAX with the same offset, decrease
1023
// their r_offset by the same delta.
1024
delta = 0;
1025
for (size_t i = 0, e = rels.size(); i != e;) {
1026
uint64_t cur = rels[i].offset;
1027
do {
1028
rels[i].offset -= delta;
1029
if (aux.relocTypes[i] != R_RISCV_NONE)
1030
rels[i].type = aux.relocTypes[i];
1031
} while (++i != e && rels[i].offset == cur);
1032
delta = aux.relocDeltas[i - 1];
1033
}
1034
}
1035
}
1036
}
1037
1038
namespace {
1039
// Representation of the merged .riscv.attributes input sections. The psABI
1040
// specifies merge policy for attributes. E.g. if we link an object without an
1041
// extension with an object with the extension, the output Tag_RISCV_arch shall
1042
// contain the extension. Some tools like objdump parse .riscv.attributes and
1043
// disabling some instructions if the first Tag_RISCV_arch does not contain an
1044
// extension.
1045
class RISCVAttributesSection final : public SyntheticSection {
1046
public:
1047
RISCVAttributesSection()
1048
: SyntheticSection(0, SHT_RISCV_ATTRIBUTES, 1, ".riscv.attributes") {}
1049
1050
size_t getSize() const override { return size; }
1051
void writeTo(uint8_t *buf) override;
1052
1053
static constexpr StringRef vendor = "riscv";
1054
DenseMap<unsigned, unsigned> intAttr;
1055
DenseMap<unsigned, StringRef> strAttr;
1056
size_t size = 0;
1057
};
1058
} // namespace
1059
1060
static void mergeArch(RISCVISAUtils::OrderedExtensionMap &mergedExts,
1061
unsigned &mergedXlen, const InputSectionBase *sec,
1062
StringRef s) {
1063
auto maybeInfo = RISCVISAInfo::parseNormalizedArchString(s);
1064
if (!maybeInfo) {
1065
errorOrWarn(toString(sec) + ": " + s + ": " +
1066
llvm::toString(maybeInfo.takeError()));
1067
return;
1068
}
1069
1070
// Merge extensions.
1071
RISCVISAInfo &info = **maybeInfo;
1072
if (mergedExts.empty()) {
1073
mergedExts = info.getExtensions();
1074
mergedXlen = info.getXLen();
1075
} else {
1076
for (const auto &ext : info.getExtensions()) {
1077
auto p = mergedExts.insert(ext);
1078
if (!p.second) {
1079
if (std::tie(p.first->second.Major, p.first->second.Minor) <
1080
std::tie(ext.second.Major, ext.second.Minor))
1081
p.first->second = ext.second;
1082
}
1083
}
1084
}
1085
}
1086
1087
static void mergeAtomic(DenseMap<unsigned, unsigned>::iterator it,
1088
const InputSectionBase *oldSection,
1089
const InputSectionBase *newSection,
1090
RISCVAttrs::RISCVAtomicAbiTag oldTag,
1091
RISCVAttrs::RISCVAtomicAbiTag newTag) {
1092
using RISCVAttrs::RISCVAtomicAbiTag;
1093
// Same tags stay the same, and UNKNOWN is compatible with anything
1094
if (oldTag == newTag || newTag == RISCVAtomicAbiTag::UNKNOWN)
1095
return;
1096
1097
auto reportAbiError = [&]() {
1098
errorOrWarn("atomic abi mismatch for " + oldSection->name + "\n>>> " +
1099
toString(oldSection) +
1100
": atomic_abi=" + Twine(static_cast<unsigned>(oldTag)) +
1101
"\n>>> " + toString(newSection) +
1102
": atomic_abi=" + Twine(static_cast<unsigned>(newTag)));
1103
};
1104
1105
auto reportUnknownAbiError = [](const InputSectionBase *section,
1106
RISCVAtomicAbiTag tag) {
1107
switch (tag) {
1108
case RISCVAtomicAbiTag::UNKNOWN:
1109
case RISCVAtomicAbiTag::A6C:
1110
case RISCVAtomicAbiTag::A6S:
1111
case RISCVAtomicAbiTag::A7:
1112
return;
1113
};
1114
errorOrWarn("unknown atomic abi for " + section->name + "\n>>> " +
1115
toString(section) +
1116
": atomic_abi=" + Twine(static_cast<unsigned>(tag)));
1117
};
1118
switch (oldTag) {
1119
case RISCVAtomicAbiTag::UNKNOWN:
1120
it->getSecond() = static_cast<unsigned>(newTag);
1121
return;
1122
case RISCVAtomicAbiTag::A6C:
1123
switch (newTag) {
1124
case RISCVAtomicAbiTag::A6S:
1125
it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A6C);
1126
return;
1127
case RISCVAtomicAbiTag::A7:
1128
reportAbiError();
1129
return;
1130
case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1131
case RISCVAttrs::RISCVAtomicAbiTag::A6C:
1132
return;
1133
};
1134
break;
1135
1136
case RISCVAtomicAbiTag::A6S:
1137
switch (newTag) {
1138
case RISCVAtomicAbiTag::A6C:
1139
it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A6C);
1140
return;
1141
case RISCVAtomicAbiTag::A7:
1142
it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A7);
1143
return;
1144
case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1145
case RISCVAttrs::RISCVAtomicAbiTag::A6S:
1146
return;
1147
};
1148
break;
1149
1150
case RISCVAtomicAbiTag::A7:
1151
switch (newTag) {
1152
case RISCVAtomicAbiTag::A6S:
1153
it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A7);
1154
return;
1155
case RISCVAtomicAbiTag::A6C:
1156
reportAbiError();
1157
return;
1158
case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1159
case RISCVAttrs::RISCVAtomicAbiTag::A7:
1160
return;
1161
};
1162
break;
1163
};
1164
1165
// If we get here, then we have an invalid tag, so report it.
1166
// Putting these checks at the end allows us to only do these checks when we
1167
// need to, since this is expected to be a rare occurrence.
1168
reportUnknownAbiError(oldSection, oldTag);
1169
reportUnknownAbiError(newSection, newTag);
1170
}
1171
1172
static RISCVAttributesSection *
1173
mergeAttributesSection(const SmallVector<InputSectionBase *, 0> &sections) {
1174
using RISCVAttrs::RISCVAtomicAbiTag;
1175
RISCVISAUtils::OrderedExtensionMap exts;
1176
const InputSectionBase *firstStackAlign = nullptr;
1177
const InputSectionBase *firstAtomicAbi = nullptr;
1178
unsigned firstStackAlignValue = 0, xlen = 0;
1179
bool hasArch = false;
1180
1181
in.riscvAttributes = std::make_unique<RISCVAttributesSection>();
1182
auto &merged = static_cast<RISCVAttributesSection &>(*in.riscvAttributes);
1183
1184
// Collect all tags values from attributes section.
1185
const auto &attributesTags = RISCVAttrs::getRISCVAttributeTags();
1186
for (const InputSectionBase *sec : sections) {
1187
RISCVAttributeParser parser;
1188
if (Error e = parser.parse(sec->content(), llvm::endianness::little))
1189
warn(toString(sec) + ": " + llvm::toString(std::move(e)));
1190
for (const auto &tag : attributesTags) {
1191
switch (RISCVAttrs::AttrType(tag.attr)) {
1192
// Integer attributes.
1193
case RISCVAttrs::STACK_ALIGN:
1194
if (auto i = parser.getAttributeValue(tag.attr)) {
1195
auto r = merged.intAttr.try_emplace(tag.attr, *i);
1196
if (r.second) {
1197
firstStackAlign = sec;
1198
firstStackAlignValue = *i;
1199
} else if (r.first->second != *i) {
1200
errorOrWarn(toString(sec) + " has stack_align=" + Twine(*i) +
1201
" but " + toString(firstStackAlign) +
1202
" has stack_align=" + Twine(firstStackAlignValue));
1203
}
1204
}
1205
continue;
1206
case RISCVAttrs::UNALIGNED_ACCESS:
1207
if (auto i = parser.getAttributeValue(tag.attr))
1208
merged.intAttr[tag.attr] |= *i;
1209
continue;
1210
1211
// String attributes.
1212
case RISCVAttrs::ARCH:
1213
if (auto s = parser.getAttributeString(tag.attr)) {
1214
hasArch = true;
1215
mergeArch(exts, xlen, sec, *s);
1216
}
1217
continue;
1218
1219
// Attributes which use the default handling.
1220
case RISCVAttrs::PRIV_SPEC:
1221
case RISCVAttrs::PRIV_SPEC_MINOR:
1222
case RISCVAttrs::PRIV_SPEC_REVISION:
1223
break;
1224
1225
case RISCVAttrs::AttrType::ATOMIC_ABI:
1226
if (auto i = parser.getAttributeValue(tag.attr)) {
1227
auto r = merged.intAttr.try_emplace(tag.attr, *i);
1228
if (r.second)
1229
firstAtomicAbi = sec;
1230
else
1231
mergeAtomic(r.first, firstAtomicAbi, sec,
1232
static_cast<RISCVAtomicAbiTag>(r.first->getSecond()),
1233
static_cast<RISCVAtomicAbiTag>(*i));
1234
}
1235
continue;
1236
}
1237
1238
// Fallback for deprecated priv_spec* and other unknown attributes: retain
1239
// the attribute if all input sections agree on the value. GNU ld uses 0
1240
// and empty strings as default values which are not dumped to the output.
1241
// TODO Adjust after resolution to
1242
// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/issues/352
1243
if (tag.attr % 2 == 0) {
1244
if (auto i = parser.getAttributeValue(tag.attr)) {
1245
auto r = merged.intAttr.try_emplace(tag.attr, *i);
1246
if (!r.second && r.first->second != *i)
1247
r.first->second = 0;
1248
}
1249
} else if (auto s = parser.getAttributeString(tag.attr)) {
1250
auto r = merged.strAttr.try_emplace(tag.attr, *s);
1251
if (!r.second && r.first->second != *s)
1252
r.first->second = {};
1253
}
1254
}
1255
}
1256
1257
if (hasArch && xlen != 0) {
1258
if (auto result = RISCVISAInfo::createFromExtMap(xlen, exts)) {
1259
merged.strAttr.try_emplace(RISCVAttrs::ARCH,
1260
saver().save((*result)->toString()));
1261
} else {
1262
errorOrWarn(llvm::toString(result.takeError()));
1263
}
1264
}
1265
1266
// The total size of headers: format-version [ <section-length> "vendor-name"
1267
// [ <file-tag> <size>.
1268
size_t size = 5 + merged.vendor.size() + 1 + 5;
1269
for (auto &attr : merged.intAttr)
1270
if (attr.second != 0)
1271
size += getULEB128Size(attr.first) + getULEB128Size(attr.second);
1272
for (auto &attr : merged.strAttr)
1273
if (!attr.second.empty())
1274
size += getULEB128Size(attr.first) + attr.second.size() + 1;
1275
merged.size = size;
1276
return &merged;
1277
}
1278
1279
void RISCVAttributesSection::writeTo(uint8_t *buf) {
1280
const size_t size = getSize();
1281
uint8_t *const end = buf + size;
1282
*buf = ELFAttrs::Format_Version;
1283
write32(buf + 1, size - 1);
1284
buf += 5;
1285
1286
memcpy(buf, vendor.data(), vendor.size());
1287
buf += vendor.size() + 1;
1288
1289
*buf = ELFAttrs::File;
1290
write32(buf + 1, end - buf);
1291
buf += 5;
1292
1293
for (auto &attr : intAttr) {
1294
if (attr.second == 0)
1295
continue;
1296
buf += encodeULEB128(attr.first, buf);
1297
buf += encodeULEB128(attr.second, buf);
1298
}
1299
for (auto &attr : strAttr) {
1300
if (attr.second.empty())
1301
continue;
1302
buf += encodeULEB128(attr.first, buf);
1303
memcpy(buf, attr.second.data(), attr.second.size());
1304
buf += attr.second.size() + 1;
1305
}
1306
}
1307
1308
void elf::mergeRISCVAttributesSections() {
1309
// Find the first input SHT_RISCV_ATTRIBUTES; return if not found.
1310
size_t place =
1311
llvm::find_if(ctx.inputSections,
1312
[](auto *s) { return s->type == SHT_RISCV_ATTRIBUTES; }) -
1313
ctx.inputSections.begin();
1314
if (place == ctx.inputSections.size())
1315
return;
1316
1317
// Extract all SHT_RISCV_ATTRIBUTES sections into `sections`.
1318
SmallVector<InputSectionBase *, 0> sections;
1319
llvm::erase_if(ctx.inputSections, [&](InputSectionBase *s) {
1320
if (s->type != SHT_RISCV_ATTRIBUTES)
1321
return false;
1322
sections.push_back(s);
1323
return true;
1324
});
1325
1326
// Add the merged section.
1327
ctx.inputSections.insert(ctx.inputSections.begin() + place,
1328
mergeAttributesSection(sections));
1329
}
1330
1331
TargetInfo *elf::getRISCVTargetInfo() {
1332
static RISCV target;
1333
return &target;
1334
}
1335
1336