Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/objtool/check.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2015-2017 Josh Poimboeuf <[email protected]>
4
*/
5
6
#include <string.h>
7
#include <stdlib.h>
8
#include <inttypes.h>
9
#include <sys/mman.h>
10
11
#include <objtool/builtin.h>
12
#include <objtool/cfi.h>
13
#include <objtool/arch.h>
14
#include <objtool/check.h>
15
#include <objtool/special.h>
16
#include <objtool/warn.h>
17
#include <objtool/endianness.h>
18
19
#include <linux/objtool_types.h>
20
#include <linux/hashtable.h>
21
#include <linux/kernel.h>
22
#include <linux/static_call_types.h>
23
#include <linux/string.h>
24
25
struct alternative {
26
struct alternative *next;
27
struct instruction *insn;
28
};
29
30
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32
static struct cfi_init_state initial_func_cfi;
33
static struct cfi_state init_cfi;
34
static struct cfi_state func_cfi;
35
static struct cfi_state force_undefined_cfi;
36
37
struct instruction *find_insn(struct objtool_file *file,
38
struct section *sec, unsigned long offset)
39
{
40
struct instruction *insn;
41
42
hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43
if (insn->sec == sec && insn->offset == offset)
44
return insn;
45
}
46
47
return NULL;
48
}
49
50
struct instruction *next_insn_same_sec(struct objtool_file *file,
51
struct instruction *insn)
52
{
53
if (insn->idx == INSN_CHUNK_MAX)
54
return find_insn(file, insn->sec, insn->offset + insn->len);
55
56
insn++;
57
if (!insn->len)
58
return NULL;
59
60
return insn;
61
}
62
63
static struct instruction *next_insn_same_func(struct objtool_file *file,
64
struct instruction *insn)
65
{
66
struct instruction *next = next_insn_same_sec(file, insn);
67
struct symbol *func = insn_func(insn);
68
69
if (!func)
70
return NULL;
71
72
if (next && insn_func(next) == func)
73
return next;
74
75
/* Check if we're already in the subfunction: */
76
if (func == func->cfunc)
77
return NULL;
78
79
/* Move to the subfunction: */
80
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81
}
82
83
static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84
struct instruction *insn)
85
{
86
if (insn->idx == 0) {
87
if (insn->prev_len)
88
return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89
return NULL;
90
}
91
92
return insn - 1;
93
}
94
95
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96
struct instruction *insn)
97
{
98
struct instruction *prev = prev_insn_same_sec(file, insn);
99
100
if (prev && insn_func(prev) == insn_func(insn))
101
return prev;
102
103
return NULL;
104
}
105
106
#define for_each_insn(file, insn) \
107
for (struct section *__sec, *__fake = (struct section *)1; \
108
__fake; __fake = NULL) \
109
for_each_sec(file, __sec) \
110
sec_for_each_insn(file, __sec, insn)
111
112
#define func_for_each_insn(file, func, insn) \
113
for (insn = find_insn(file, func->sec, func->offset); \
114
insn; \
115
insn = next_insn_same_func(file, insn))
116
117
#define sym_for_each_insn(file, sym, insn) \
118
for (insn = find_insn(file, sym->sec, sym->offset); \
119
insn && insn->offset < sym->offset + sym->len; \
120
insn = next_insn_same_sec(file, insn))
121
122
#define sym_for_each_insn_continue_reverse(file, sym, insn) \
123
for (insn = prev_insn_same_sec(file, insn); \
124
insn && insn->offset >= sym->offset; \
125
insn = prev_insn_same_sec(file, insn))
126
127
#define sec_for_each_insn_from(file, insn) \
128
for (; insn; insn = next_insn_same_sec(file, insn))
129
130
#define sec_for_each_insn_continue(file, insn) \
131
for (insn = next_insn_same_sec(file, insn); insn; \
132
insn = next_insn_same_sec(file, insn))
133
134
static inline struct symbol *insn_call_dest(struct instruction *insn)
135
{
136
if (insn->type == INSN_JUMP_DYNAMIC ||
137
insn->type == INSN_CALL_DYNAMIC)
138
return NULL;
139
140
return insn->_call_dest;
141
}
142
143
static inline struct reloc *insn_jump_table(struct instruction *insn)
144
{
145
if (insn->type == INSN_JUMP_DYNAMIC ||
146
insn->type == INSN_CALL_DYNAMIC)
147
return insn->_jump_table;
148
149
return NULL;
150
}
151
152
static inline unsigned long insn_jump_table_size(struct instruction *insn)
153
{
154
if (insn->type == INSN_JUMP_DYNAMIC ||
155
insn->type == INSN_CALL_DYNAMIC)
156
return insn->_jump_table_size;
157
158
return 0;
159
}
160
161
static bool is_jump_table_jump(struct instruction *insn)
162
{
163
struct alt_group *alt_group = insn->alt_group;
164
165
if (insn_jump_table(insn))
166
return true;
167
168
/* Retpoline alternative for a jump table? */
169
return alt_group && alt_group->orig_group &&
170
insn_jump_table(alt_group->orig_group->first_insn);
171
}
172
173
static bool is_sibling_call(struct instruction *insn)
174
{
175
/*
176
* Assume only STT_FUNC calls have jump-tables.
177
*/
178
if (insn_func(insn)) {
179
/* An indirect jump is either a sibling call or a jump to a table. */
180
if (insn->type == INSN_JUMP_DYNAMIC)
181
return !is_jump_table_jump(insn);
182
}
183
184
/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
185
return (is_static_jump(insn) && insn_call_dest(insn));
186
}
187
188
/*
189
* Checks if a string ends with another.
190
*/
191
static bool str_ends_with(const char *s, const char *sub)
192
{
193
const int slen = strlen(s);
194
const int sublen = strlen(sub);
195
196
if (sublen > slen)
197
return 0;
198
199
return !memcmp(s + slen - sublen, sub, sublen);
200
}
201
202
/*
203
* Checks if a function is a Rust "noreturn" one.
204
*/
205
static bool is_rust_noreturn(const struct symbol *func)
206
{
207
/*
208
* If it does not start with "_R", then it is not a Rust symbol.
209
*/
210
if (strncmp(func->name, "_R", 2))
211
return false;
212
213
/*
214
* These are just heuristics -- we do not control the precise symbol
215
* name, due to the crate disambiguators (which depend on the compiler)
216
* as well as changes to the source code itself between versions (since
217
* these come from the Rust standard library).
218
*/
219
return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
220
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
221
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
222
str_ends_with(func->name, "_4core9panicking5panic") ||
223
str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
224
str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
225
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
226
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
227
str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
228
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
229
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
230
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
231
str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
232
strstr(func->name, "_4core9panicking13assert_failed") ||
233
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
234
(strstr(func->name, "_4core5slice5index") &&
235
strstr(func->name, "slice_") &&
236
str_ends_with(func->name, "_fail"));
237
}
238
239
/*
240
* This checks to see if the given function is a "noreturn" function.
241
*
242
* For global functions which are outside the scope of this object file, we
243
* have to keep a manual list of them.
244
*
245
* For local functions, we have to detect them manually by simply looking for
246
* the lack of a return instruction.
247
*/
248
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
249
int recursion)
250
{
251
int i;
252
struct instruction *insn;
253
bool empty = true;
254
255
#define NORETURN(func) __stringify(func),
256
static const char * const global_noreturns[] = {
257
#include "noreturns.h"
258
};
259
#undef NORETURN
260
261
if (!func)
262
return false;
263
264
if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) {
265
if (is_rust_noreturn(func))
266
return true;
267
268
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
269
if (!strcmp(func->name, global_noreturns[i]))
270
return true;
271
}
272
273
if (func->bind == STB_WEAK)
274
return false;
275
276
if (!func->len)
277
return false;
278
279
insn = find_insn(file, func->sec, func->offset);
280
if (!insn || !insn_func(insn))
281
return false;
282
283
func_for_each_insn(file, func, insn) {
284
empty = false;
285
286
if (insn->type == INSN_RETURN)
287
return false;
288
}
289
290
if (empty)
291
return false;
292
293
/*
294
* A function can have a sibling call instead of a return. In that
295
* case, the function's dead-end status depends on whether the target
296
* of the sibling call returns.
297
*/
298
func_for_each_insn(file, func, insn) {
299
if (is_sibling_call(insn)) {
300
struct instruction *dest = insn->jump_dest;
301
302
if (!dest)
303
/* sibling call to another file */
304
return false;
305
306
/* local sibling call */
307
if (recursion == 5) {
308
/*
309
* Infinite recursion: two functions have
310
* sibling calls to each other. This is a very
311
* rare case. It means they aren't dead ends.
312
*/
313
return false;
314
}
315
316
return __dead_end_function(file, insn_func(dest), recursion+1);
317
}
318
}
319
320
return true;
321
}
322
323
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
324
{
325
return __dead_end_function(file, func, 0);
326
}
327
328
static void init_cfi_state(struct cfi_state *cfi)
329
{
330
int i;
331
332
for (i = 0; i < CFI_NUM_REGS; i++) {
333
cfi->regs[i].base = CFI_UNDEFINED;
334
cfi->vals[i].base = CFI_UNDEFINED;
335
}
336
cfi->cfa.base = CFI_UNDEFINED;
337
cfi->drap_reg = CFI_UNDEFINED;
338
cfi->drap_offset = -1;
339
}
340
341
static void init_insn_state(struct objtool_file *file, struct insn_state *state,
342
struct section *sec)
343
{
344
memset(state, 0, sizeof(*state));
345
init_cfi_state(&state->cfi);
346
347
if (opts.noinstr && sec)
348
state->noinstr = sec->noinstr;
349
}
350
351
static struct cfi_state *cfi_alloc(void)
352
{
353
struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
354
if (!cfi) {
355
ERROR_GLIBC("calloc");
356
exit(1);
357
}
358
nr_cfi++;
359
return cfi;
360
}
361
362
static int cfi_bits;
363
static struct hlist_head *cfi_hash;
364
365
static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
366
{
367
return memcmp((void *)cfi1 + sizeof(cfi1->hash),
368
(void *)cfi2 + sizeof(cfi2->hash),
369
sizeof(struct cfi_state) - sizeof(struct hlist_node));
370
}
371
372
static inline u32 cfi_key(struct cfi_state *cfi)
373
{
374
return jhash((void *)cfi + sizeof(cfi->hash),
375
sizeof(*cfi) - sizeof(cfi->hash), 0);
376
}
377
378
static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
379
{
380
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
381
struct cfi_state *obj;
382
383
hlist_for_each_entry(obj, head, hash) {
384
if (!cficmp(cfi, obj)) {
385
nr_cfi_cache++;
386
return obj;
387
}
388
}
389
390
obj = cfi_alloc();
391
*obj = *cfi;
392
hlist_add_head(&obj->hash, head);
393
394
return obj;
395
}
396
397
static void cfi_hash_add(struct cfi_state *cfi)
398
{
399
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
400
401
hlist_add_head(&cfi->hash, head);
402
}
403
404
static void *cfi_hash_alloc(unsigned long size)
405
{
406
cfi_bits = max(10, ilog2(size));
407
cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
408
PROT_READ|PROT_WRITE,
409
MAP_PRIVATE|MAP_ANON, -1, 0);
410
if (cfi_hash == (void *)-1L) {
411
ERROR_GLIBC("mmap fail cfi_hash");
412
cfi_hash = NULL;
413
} else if (opts.stats) {
414
printf("cfi_bits: %d\n", cfi_bits);
415
}
416
417
return cfi_hash;
418
}
419
420
static unsigned long nr_insns;
421
static unsigned long nr_insns_visited;
422
423
/*
424
* Call the arch-specific instruction decoder for all the instructions and add
425
* them to the global instruction list.
426
*/
427
static int decode_instructions(struct objtool_file *file)
428
{
429
struct section *sec;
430
struct symbol *func;
431
unsigned long offset;
432
struct instruction *insn;
433
int ret;
434
435
for_each_sec(file, sec) {
436
struct instruction *insns = NULL;
437
u8 prev_len = 0;
438
u8 idx = 0;
439
440
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
441
continue;
442
443
if (strcmp(sec->name, ".altinstr_replacement") &&
444
strcmp(sec->name, ".altinstr_aux") &&
445
strncmp(sec->name, ".discard.", 9))
446
sec->text = true;
447
448
if (!strcmp(sec->name, ".noinstr.text") ||
449
!strcmp(sec->name, ".entry.text") ||
450
!strcmp(sec->name, ".cpuidle.text") ||
451
!strncmp(sec->name, ".text..__x86.", 13))
452
sec->noinstr = true;
453
454
/*
455
* .init.text code is ran before userspace and thus doesn't
456
* strictly need retpolines, except for modules which are
457
* loaded late, they very much do need retpoline in their
458
* .init.text
459
*/
460
if (!strcmp(sec->name, ".init.text") && !opts.module)
461
sec->init = true;
462
463
for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
464
if (!insns || idx == INSN_CHUNK_MAX) {
465
insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
466
if (!insns) {
467
ERROR_GLIBC("calloc");
468
return -1;
469
}
470
idx = 0;
471
} else {
472
idx++;
473
}
474
insn = &insns[idx];
475
insn->idx = idx;
476
477
INIT_LIST_HEAD(&insn->call_node);
478
insn->sec = sec;
479
insn->offset = offset;
480
insn->prev_len = prev_len;
481
482
ret = arch_decode_instruction(file, sec, offset,
483
sec->sh.sh_size - offset,
484
insn);
485
if (ret)
486
return ret;
487
488
prev_len = insn->len;
489
490
/*
491
* By default, "ud2" is a dead end unless otherwise
492
* annotated, because GCC 7 inserts it for certain
493
* divide-by-zero cases.
494
*/
495
if (insn->type == INSN_BUG)
496
insn->dead_end = true;
497
498
hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
499
nr_insns++;
500
}
501
502
sec_for_each_sym(sec, func) {
503
if (func->type != STT_NOTYPE && func->type != STT_FUNC)
504
continue;
505
506
if (func->offset == sec->sh.sh_size) {
507
/* Heuristic: likely an "end" symbol */
508
if (func->type == STT_NOTYPE)
509
continue;
510
ERROR("%s(): STT_FUNC at end of section", func->name);
511
return -1;
512
}
513
514
if (func->embedded_insn || func->alias != func)
515
continue;
516
517
if (!find_insn(file, sec, func->offset)) {
518
ERROR("%s(): can't find starting instruction", func->name);
519
return -1;
520
}
521
522
sym_for_each_insn(file, func, insn) {
523
insn->sym = func;
524
if (func->type == STT_FUNC &&
525
insn->type == INSN_ENDBR &&
526
list_empty(&insn->call_node)) {
527
if (insn->offset == func->offset) {
528
list_add_tail(&insn->call_node, &file->endbr_list);
529
file->nr_endbr++;
530
} else {
531
file->nr_endbr_int++;
532
}
533
}
534
}
535
}
536
}
537
538
if (opts.stats)
539
printf("nr_insns: %lu\n", nr_insns);
540
541
return 0;
542
}
543
544
/*
545
* Read the pv_ops[] .data table to find the static initialized values.
546
*/
547
static int add_pv_ops(struct objtool_file *file, const char *symname)
548
{
549
struct symbol *sym, *func;
550
unsigned long off, end;
551
struct reloc *reloc;
552
int idx;
553
554
sym = find_symbol_by_name(file->elf, symname);
555
if (!sym)
556
return 0;
557
558
off = sym->offset;
559
end = off + sym->len;
560
for (;;) {
561
reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
562
if (!reloc)
563
break;
564
565
idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
566
567
func = reloc->sym;
568
if (func->type == STT_SECTION)
569
func = find_symbol_by_offset(reloc->sym->sec,
570
reloc_addend(reloc));
571
if (!func) {
572
ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
573
"can't find func at %s[%d]", symname, idx);
574
return -1;
575
}
576
577
if (objtool_pv_add(file, idx, func))
578
return -1;
579
580
off = reloc_offset(reloc) + 1;
581
if (off > end)
582
break;
583
}
584
585
return 0;
586
}
587
588
/*
589
* Allocate and initialize file->pv_ops[].
590
*/
591
static int init_pv_ops(struct objtool_file *file)
592
{
593
static const char *pv_ops_tables[] = {
594
"pv_ops",
595
"xen_cpu_ops",
596
"xen_irq_ops",
597
"xen_mmu_ops",
598
NULL,
599
};
600
const char *pv_ops;
601
struct symbol *sym;
602
int idx, nr, ret;
603
604
if (!opts.noinstr)
605
return 0;
606
607
file->pv_ops = NULL;
608
609
sym = find_symbol_by_name(file->elf, "pv_ops");
610
if (!sym)
611
return 0;
612
613
nr = sym->len / sizeof(unsigned long);
614
file->pv_ops = calloc(sizeof(struct pv_state), nr);
615
if (!file->pv_ops) {
616
ERROR_GLIBC("calloc");
617
return -1;
618
}
619
620
for (idx = 0; idx < nr; idx++)
621
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
622
623
for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
624
ret = add_pv_ops(file, pv_ops);
625
if (ret)
626
return ret;
627
}
628
629
return 0;
630
}
631
632
static int create_static_call_sections(struct objtool_file *file)
633
{
634
struct static_call_site *site;
635
struct section *sec;
636
struct instruction *insn;
637
struct symbol *key_sym;
638
char *key_name, *tmp;
639
int idx;
640
641
sec = find_section_by_name(file->elf, ".static_call_sites");
642
if (sec) {
643
INIT_LIST_HEAD(&file->static_call_list);
644
WARN("file already has .static_call_sites section, skipping");
645
return 0;
646
}
647
648
if (list_empty(&file->static_call_list))
649
return 0;
650
651
idx = 0;
652
list_for_each_entry(insn, &file->static_call_list, call_node)
653
idx++;
654
655
sec = elf_create_section_pair(file->elf, ".static_call_sites",
656
sizeof(*site), idx, idx * 2);
657
if (!sec)
658
return -1;
659
660
/* Allow modules to modify the low bits of static_call_site::key */
661
sec->sh.sh_flags |= SHF_WRITE;
662
663
idx = 0;
664
list_for_each_entry(insn, &file->static_call_list, call_node) {
665
666
/* populate reloc for 'addr' */
667
if (!elf_init_reloc_text_sym(file->elf, sec,
668
idx * sizeof(*site), idx * 2,
669
insn->sec, insn->offset))
670
return -1;
671
672
/* find key symbol */
673
key_name = strdup(insn_call_dest(insn)->name);
674
if (!key_name) {
675
ERROR_GLIBC("strdup");
676
return -1;
677
}
678
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
679
STATIC_CALL_TRAMP_PREFIX_LEN)) {
680
ERROR("static_call: trampoline name malformed: %s", key_name);
681
return -1;
682
}
683
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
684
memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
685
686
key_sym = find_symbol_by_name(file->elf, tmp);
687
if (!key_sym) {
688
if (!opts.module) {
689
ERROR("static_call: can't find static_call_key symbol: %s", tmp);
690
return -1;
691
}
692
693
/*
694
* For modules(), the key might not be exported, which
695
* means the module can make static calls but isn't
696
* allowed to change them.
697
*
698
* In that case we temporarily set the key to be the
699
* trampoline address. This is fixed up in
700
* static_call_add_module().
701
*/
702
key_sym = insn_call_dest(insn);
703
}
704
705
/* populate reloc for 'key' */
706
if (!elf_init_reloc_data_sym(file->elf, sec,
707
idx * sizeof(*site) + 4,
708
(idx * 2) + 1, key_sym,
709
is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
710
return -1;
711
712
idx++;
713
}
714
715
return 0;
716
}
717
718
static int create_retpoline_sites_sections(struct objtool_file *file)
719
{
720
struct instruction *insn;
721
struct section *sec;
722
int idx;
723
724
sec = find_section_by_name(file->elf, ".retpoline_sites");
725
if (sec) {
726
WARN("file already has .retpoline_sites, skipping");
727
return 0;
728
}
729
730
idx = 0;
731
list_for_each_entry(insn, &file->retpoline_call_list, call_node)
732
idx++;
733
734
if (!idx)
735
return 0;
736
737
sec = elf_create_section_pair(file->elf, ".retpoline_sites",
738
sizeof(int), idx, idx);
739
if (!sec)
740
return -1;
741
742
idx = 0;
743
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
744
745
if (!elf_init_reloc_text_sym(file->elf, sec,
746
idx * sizeof(int), idx,
747
insn->sec, insn->offset))
748
return -1;
749
750
idx++;
751
}
752
753
return 0;
754
}
755
756
static int create_return_sites_sections(struct objtool_file *file)
757
{
758
struct instruction *insn;
759
struct section *sec;
760
int idx;
761
762
sec = find_section_by_name(file->elf, ".return_sites");
763
if (sec) {
764
WARN("file already has .return_sites, skipping");
765
return 0;
766
}
767
768
idx = 0;
769
list_for_each_entry(insn, &file->return_thunk_list, call_node)
770
idx++;
771
772
if (!idx)
773
return 0;
774
775
sec = elf_create_section_pair(file->elf, ".return_sites",
776
sizeof(int), idx, idx);
777
if (!sec)
778
return -1;
779
780
idx = 0;
781
list_for_each_entry(insn, &file->return_thunk_list, call_node) {
782
783
if (!elf_init_reloc_text_sym(file->elf, sec,
784
idx * sizeof(int), idx,
785
insn->sec, insn->offset))
786
return -1;
787
788
idx++;
789
}
790
791
return 0;
792
}
793
794
static int create_ibt_endbr_seal_sections(struct objtool_file *file)
795
{
796
struct instruction *insn;
797
struct section *sec;
798
int idx;
799
800
sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
801
if (sec) {
802
WARN("file already has .ibt_endbr_seal, skipping");
803
return 0;
804
}
805
806
idx = 0;
807
list_for_each_entry(insn, &file->endbr_list, call_node)
808
idx++;
809
810
if (opts.stats) {
811
printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
812
printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
813
printf("ibt: superfluous ENDBR: %d\n", idx);
814
}
815
816
if (!idx)
817
return 0;
818
819
sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
820
sizeof(int), idx, idx);
821
if (!sec)
822
return -1;
823
824
idx = 0;
825
list_for_each_entry(insn, &file->endbr_list, call_node) {
826
827
int *site = (int *)sec->data->d_buf + idx;
828
struct symbol *sym = insn->sym;
829
*site = 0;
830
831
if (opts.module && sym && sym->type == STT_FUNC &&
832
insn->offset == sym->offset &&
833
(!strcmp(sym->name, "init_module") ||
834
!strcmp(sym->name, "cleanup_module"))) {
835
ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
836
sym->name);
837
return -1;
838
}
839
840
if (!elf_init_reloc_text_sym(file->elf, sec,
841
idx * sizeof(int), idx,
842
insn->sec, insn->offset))
843
return -1;
844
845
idx++;
846
}
847
848
return 0;
849
}
850
851
static int create_cfi_sections(struct objtool_file *file)
852
{
853
struct section *sec;
854
struct symbol *sym;
855
int idx;
856
857
sec = find_section_by_name(file->elf, ".cfi_sites");
858
if (sec) {
859
INIT_LIST_HEAD(&file->call_list);
860
WARN("file already has .cfi_sites section, skipping");
861
return 0;
862
}
863
864
idx = 0;
865
for_each_sym(file, sym) {
866
if (sym->type != STT_FUNC)
867
continue;
868
869
if (strncmp(sym->name, "__cfi_", 6))
870
continue;
871
872
idx++;
873
}
874
875
sec = elf_create_section_pair(file->elf, ".cfi_sites",
876
sizeof(unsigned int), idx, idx);
877
if (!sec)
878
return -1;
879
880
idx = 0;
881
for_each_sym(file, sym) {
882
if (sym->type != STT_FUNC)
883
continue;
884
885
if (strncmp(sym->name, "__cfi_", 6))
886
continue;
887
888
if (!elf_init_reloc_text_sym(file->elf, sec,
889
idx * sizeof(unsigned int), idx,
890
sym->sec, sym->offset))
891
return -1;
892
893
idx++;
894
}
895
896
return 0;
897
}
898
899
static int create_mcount_loc_sections(struct objtool_file *file)
900
{
901
size_t addr_size = elf_addr_size(file->elf);
902
struct instruction *insn;
903
struct section *sec;
904
int idx;
905
906
sec = find_section_by_name(file->elf, "__mcount_loc");
907
if (sec) {
908
INIT_LIST_HEAD(&file->mcount_loc_list);
909
WARN("file already has __mcount_loc section, skipping");
910
return 0;
911
}
912
913
if (list_empty(&file->mcount_loc_list))
914
return 0;
915
916
idx = 0;
917
list_for_each_entry(insn, &file->mcount_loc_list, call_node)
918
idx++;
919
920
sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
921
idx, idx);
922
if (!sec)
923
return -1;
924
925
sec->sh.sh_addralign = addr_size;
926
927
idx = 0;
928
list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
929
930
struct reloc *reloc;
931
932
reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
933
insn->sec, insn->offset);
934
if (!reloc)
935
return -1;
936
937
set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
938
939
idx++;
940
}
941
942
return 0;
943
}
944
945
static int create_direct_call_sections(struct objtool_file *file)
946
{
947
struct instruction *insn;
948
struct section *sec;
949
int idx;
950
951
sec = find_section_by_name(file->elf, ".call_sites");
952
if (sec) {
953
INIT_LIST_HEAD(&file->call_list);
954
WARN("file already has .call_sites section, skipping");
955
return 0;
956
}
957
958
if (list_empty(&file->call_list))
959
return 0;
960
961
idx = 0;
962
list_for_each_entry(insn, &file->call_list, call_node)
963
idx++;
964
965
sec = elf_create_section_pair(file->elf, ".call_sites",
966
sizeof(unsigned int), idx, idx);
967
if (!sec)
968
return -1;
969
970
idx = 0;
971
list_for_each_entry(insn, &file->call_list, call_node) {
972
973
if (!elf_init_reloc_text_sym(file->elf, sec,
974
idx * sizeof(unsigned int), idx,
975
insn->sec, insn->offset))
976
return -1;
977
978
idx++;
979
}
980
981
return 0;
982
}
983
984
/*
985
* Warnings shouldn't be reported for ignored functions.
986
*/
987
static int add_ignores(struct objtool_file *file)
988
{
989
struct section *rsec;
990
struct symbol *func;
991
struct reloc *reloc;
992
993
rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
994
if (!rsec)
995
return 0;
996
997
for_each_reloc(rsec, reloc) {
998
switch (reloc->sym->type) {
999
case STT_FUNC:
1000
func = reloc->sym;
1001
break;
1002
1003
case STT_SECTION:
1004
func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1005
if (!func)
1006
continue;
1007
break;
1008
1009
default:
1010
ERROR("unexpected relocation symbol type in %s: %d",
1011
rsec->name, reloc->sym->type);
1012
return -1;
1013
}
1014
1015
func->ignore = true;
1016
if (func->cfunc)
1017
func->cfunc->ignore = true;
1018
}
1019
1020
return 0;
1021
}
1022
1023
/*
1024
* This is a whitelist of functions that is allowed to be called with AC set.
1025
* The list is meant to be minimal and only contains compiler instrumentation
1026
* ABI and a few functions used to implement *_{to,from}_user() functions.
1027
*
1028
* These functions must not directly change AC, but may PUSHF/POPF.
1029
*/
1030
static const char *uaccess_safe_builtin[] = {
1031
/* KASAN */
1032
"kasan_report",
1033
"kasan_check_range",
1034
/* KASAN out-of-line */
1035
"__asan_loadN_noabort",
1036
"__asan_load1_noabort",
1037
"__asan_load2_noabort",
1038
"__asan_load4_noabort",
1039
"__asan_load8_noabort",
1040
"__asan_load16_noabort",
1041
"__asan_storeN_noabort",
1042
"__asan_store1_noabort",
1043
"__asan_store2_noabort",
1044
"__asan_store4_noabort",
1045
"__asan_store8_noabort",
1046
"__asan_store16_noabort",
1047
"__kasan_check_read",
1048
"__kasan_check_write",
1049
/* KASAN in-line */
1050
"__asan_report_load_n_noabort",
1051
"__asan_report_load1_noabort",
1052
"__asan_report_load2_noabort",
1053
"__asan_report_load4_noabort",
1054
"__asan_report_load8_noabort",
1055
"__asan_report_load16_noabort",
1056
"__asan_report_store_n_noabort",
1057
"__asan_report_store1_noabort",
1058
"__asan_report_store2_noabort",
1059
"__asan_report_store4_noabort",
1060
"__asan_report_store8_noabort",
1061
"__asan_report_store16_noabort",
1062
/* KCSAN */
1063
"__kcsan_check_access",
1064
"__kcsan_mb",
1065
"__kcsan_wmb",
1066
"__kcsan_rmb",
1067
"__kcsan_release",
1068
"kcsan_found_watchpoint",
1069
"kcsan_setup_watchpoint",
1070
"kcsan_check_scoped_accesses",
1071
"kcsan_disable_current",
1072
"kcsan_enable_current_nowarn",
1073
/* KCSAN/TSAN */
1074
"__tsan_func_entry",
1075
"__tsan_func_exit",
1076
"__tsan_read_range",
1077
"__tsan_write_range",
1078
"__tsan_read1",
1079
"__tsan_read2",
1080
"__tsan_read4",
1081
"__tsan_read8",
1082
"__tsan_read16",
1083
"__tsan_write1",
1084
"__tsan_write2",
1085
"__tsan_write4",
1086
"__tsan_write8",
1087
"__tsan_write16",
1088
"__tsan_read_write1",
1089
"__tsan_read_write2",
1090
"__tsan_read_write4",
1091
"__tsan_read_write8",
1092
"__tsan_read_write16",
1093
"__tsan_volatile_read1",
1094
"__tsan_volatile_read2",
1095
"__tsan_volatile_read4",
1096
"__tsan_volatile_read8",
1097
"__tsan_volatile_read16",
1098
"__tsan_volatile_write1",
1099
"__tsan_volatile_write2",
1100
"__tsan_volatile_write4",
1101
"__tsan_volatile_write8",
1102
"__tsan_volatile_write16",
1103
"__tsan_atomic8_load",
1104
"__tsan_atomic16_load",
1105
"__tsan_atomic32_load",
1106
"__tsan_atomic64_load",
1107
"__tsan_atomic8_store",
1108
"__tsan_atomic16_store",
1109
"__tsan_atomic32_store",
1110
"__tsan_atomic64_store",
1111
"__tsan_atomic8_exchange",
1112
"__tsan_atomic16_exchange",
1113
"__tsan_atomic32_exchange",
1114
"__tsan_atomic64_exchange",
1115
"__tsan_atomic8_fetch_add",
1116
"__tsan_atomic16_fetch_add",
1117
"__tsan_atomic32_fetch_add",
1118
"__tsan_atomic64_fetch_add",
1119
"__tsan_atomic8_fetch_sub",
1120
"__tsan_atomic16_fetch_sub",
1121
"__tsan_atomic32_fetch_sub",
1122
"__tsan_atomic64_fetch_sub",
1123
"__tsan_atomic8_fetch_and",
1124
"__tsan_atomic16_fetch_and",
1125
"__tsan_atomic32_fetch_and",
1126
"__tsan_atomic64_fetch_and",
1127
"__tsan_atomic8_fetch_or",
1128
"__tsan_atomic16_fetch_or",
1129
"__tsan_atomic32_fetch_or",
1130
"__tsan_atomic64_fetch_or",
1131
"__tsan_atomic8_fetch_xor",
1132
"__tsan_atomic16_fetch_xor",
1133
"__tsan_atomic32_fetch_xor",
1134
"__tsan_atomic64_fetch_xor",
1135
"__tsan_atomic8_fetch_nand",
1136
"__tsan_atomic16_fetch_nand",
1137
"__tsan_atomic32_fetch_nand",
1138
"__tsan_atomic64_fetch_nand",
1139
"__tsan_atomic8_compare_exchange_strong",
1140
"__tsan_atomic16_compare_exchange_strong",
1141
"__tsan_atomic32_compare_exchange_strong",
1142
"__tsan_atomic64_compare_exchange_strong",
1143
"__tsan_atomic8_compare_exchange_weak",
1144
"__tsan_atomic16_compare_exchange_weak",
1145
"__tsan_atomic32_compare_exchange_weak",
1146
"__tsan_atomic64_compare_exchange_weak",
1147
"__tsan_atomic8_compare_exchange_val",
1148
"__tsan_atomic16_compare_exchange_val",
1149
"__tsan_atomic32_compare_exchange_val",
1150
"__tsan_atomic64_compare_exchange_val",
1151
"__tsan_atomic_thread_fence",
1152
"__tsan_atomic_signal_fence",
1153
"__tsan_unaligned_read16",
1154
"__tsan_unaligned_write16",
1155
/* KCOV */
1156
"write_comp_data",
1157
"check_kcov_mode",
1158
"__sanitizer_cov_trace_pc",
1159
"__sanitizer_cov_trace_const_cmp1",
1160
"__sanitizer_cov_trace_const_cmp2",
1161
"__sanitizer_cov_trace_const_cmp4",
1162
"__sanitizer_cov_trace_const_cmp8",
1163
"__sanitizer_cov_trace_cmp1",
1164
"__sanitizer_cov_trace_cmp2",
1165
"__sanitizer_cov_trace_cmp4",
1166
"__sanitizer_cov_trace_cmp8",
1167
"__sanitizer_cov_trace_switch",
1168
/* KMSAN */
1169
"kmsan_copy_to_user",
1170
"kmsan_disable_current",
1171
"kmsan_enable_current",
1172
"kmsan_report",
1173
"kmsan_unpoison_entry_regs",
1174
"kmsan_unpoison_memory",
1175
"__msan_chain_origin",
1176
"__msan_get_context_state",
1177
"__msan_instrument_asm_store",
1178
"__msan_metadata_ptr_for_load_1",
1179
"__msan_metadata_ptr_for_load_2",
1180
"__msan_metadata_ptr_for_load_4",
1181
"__msan_metadata_ptr_for_load_8",
1182
"__msan_metadata_ptr_for_load_n",
1183
"__msan_metadata_ptr_for_store_1",
1184
"__msan_metadata_ptr_for_store_2",
1185
"__msan_metadata_ptr_for_store_4",
1186
"__msan_metadata_ptr_for_store_8",
1187
"__msan_metadata_ptr_for_store_n",
1188
"__msan_poison_alloca",
1189
"__msan_warning",
1190
/* UBSAN */
1191
"ubsan_type_mismatch_common",
1192
"__ubsan_handle_type_mismatch",
1193
"__ubsan_handle_type_mismatch_v1",
1194
"__ubsan_handle_shift_out_of_bounds",
1195
"__ubsan_handle_load_invalid_value",
1196
/* KSTACK_ERASE */
1197
"__sanitizer_cov_stack_depth",
1198
/* TRACE_BRANCH_PROFILING */
1199
"ftrace_likely_update",
1200
/* STACKPROTECTOR */
1201
"__stack_chk_fail",
1202
/* misc */
1203
"csum_partial_copy_generic",
1204
"copy_mc_fragile",
1205
"copy_mc_fragile_handle_tail",
1206
"copy_mc_enhanced_fast_string",
1207
"rep_stos_alternative",
1208
"rep_movs_alternative",
1209
"__copy_user_nocache",
1210
NULL
1211
};
1212
1213
static void add_uaccess_safe(struct objtool_file *file)
1214
{
1215
struct symbol *func;
1216
const char **name;
1217
1218
if (!opts.uaccess)
1219
return;
1220
1221
for (name = uaccess_safe_builtin; *name; name++) {
1222
func = find_symbol_by_name(file->elf, *name);
1223
if (!func)
1224
continue;
1225
1226
func->uaccess_safe = true;
1227
}
1228
}
1229
1230
/*
1231
* Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1232
* will be added to the .retpoline_sites section.
1233
*/
1234
__weak bool arch_is_retpoline(struct symbol *sym)
1235
{
1236
return false;
1237
}
1238
1239
/*
1240
* Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1241
* will be added to the .return_sites section.
1242
*/
1243
__weak bool arch_is_rethunk(struct symbol *sym)
1244
{
1245
return false;
1246
}
1247
1248
/*
1249
* Symbols that are embedded inside other instructions, because sometimes crazy
1250
* code exists. These are mostly ignored for validation purposes.
1251
*/
1252
__weak bool arch_is_embedded_insn(struct symbol *sym)
1253
{
1254
return false;
1255
}
1256
1257
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1258
{
1259
struct reloc *reloc;
1260
1261
if (insn->no_reloc)
1262
return NULL;
1263
1264
if (!file)
1265
return NULL;
1266
1267
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1268
insn->offset, insn->len);
1269
if (!reloc) {
1270
insn->no_reloc = 1;
1271
return NULL;
1272
}
1273
1274
return reloc;
1275
}
1276
1277
static void remove_insn_ops(struct instruction *insn)
1278
{
1279
struct stack_op *op, *next;
1280
1281
for (op = insn->stack_ops; op; op = next) {
1282
next = op->next;
1283
free(op);
1284
}
1285
insn->stack_ops = NULL;
1286
}
1287
1288
static int annotate_call_site(struct objtool_file *file,
1289
struct instruction *insn, bool sibling)
1290
{
1291
struct reloc *reloc = insn_reloc(file, insn);
1292
struct symbol *sym = insn_call_dest(insn);
1293
1294
if (!sym)
1295
sym = reloc->sym;
1296
1297
if (sym->static_call_tramp) {
1298
list_add_tail(&insn->call_node, &file->static_call_list);
1299
return 0;
1300
}
1301
1302
if (sym->retpoline_thunk) {
1303
list_add_tail(&insn->call_node, &file->retpoline_call_list);
1304
return 0;
1305
}
1306
1307
/*
1308
* Many compilers cannot disable KCOV or sanitizer calls with a function
1309
* attribute so they need a little help, NOP out any such calls from
1310
* noinstr text.
1311
*/
1312
if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1313
if (reloc)
1314
set_reloc_type(file->elf, reloc, R_NONE);
1315
1316
if (elf_write_insn(file->elf, insn->sec,
1317
insn->offset, insn->len,
1318
sibling ? arch_ret_insn(insn->len)
1319
: arch_nop_insn(insn->len))) {
1320
return -1;
1321
}
1322
1323
insn->type = sibling ? INSN_RETURN : INSN_NOP;
1324
1325
if (sibling) {
1326
/*
1327
* We've replaced the tail-call JMP insn by two new
1328
* insn: RET; INT3, except we only have a single struct
1329
* insn here. Mark it retpoline_safe to avoid the SLS
1330
* warning, instead of adding another insn.
1331
*/
1332
insn->retpoline_safe = true;
1333
}
1334
1335
return 0;
1336
}
1337
1338
if (opts.mcount && sym->fentry) {
1339
if (sibling)
1340
WARN_INSN(insn, "tail call to __fentry__ !?!?");
1341
if (opts.mnop) {
1342
if (reloc)
1343
set_reloc_type(file->elf, reloc, R_NONE);
1344
1345
if (elf_write_insn(file->elf, insn->sec,
1346
insn->offset, insn->len,
1347
arch_nop_insn(insn->len))) {
1348
return -1;
1349
}
1350
1351
insn->type = INSN_NOP;
1352
}
1353
1354
list_add_tail(&insn->call_node, &file->mcount_loc_list);
1355
return 0;
1356
}
1357
1358
if (insn->type == INSN_CALL && !insn->sec->init &&
1359
!insn->_call_dest->embedded_insn)
1360
list_add_tail(&insn->call_node, &file->call_list);
1361
1362
if (!sibling && dead_end_function(file, sym))
1363
insn->dead_end = true;
1364
1365
return 0;
1366
}
1367
1368
static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1369
struct symbol *dest, bool sibling)
1370
{
1371
insn->_call_dest = dest;
1372
if (!dest)
1373
return 0;
1374
1375
/*
1376
* Whatever stack impact regular CALLs have, should be undone
1377
* by the RETURN of the called function.
1378
*
1379
* Annotated intra-function calls retain the stack_ops but
1380
* are converted to JUMP, see read_intra_function_calls().
1381
*/
1382
remove_insn_ops(insn);
1383
1384
return annotate_call_site(file, insn, sibling);
1385
}
1386
1387
static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1388
{
1389
/*
1390
* Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1391
* so convert them accordingly.
1392
*/
1393
switch (insn->type) {
1394
case INSN_CALL:
1395
insn->type = INSN_CALL_DYNAMIC;
1396
break;
1397
case INSN_JUMP_UNCONDITIONAL:
1398
insn->type = INSN_JUMP_DYNAMIC;
1399
break;
1400
case INSN_JUMP_CONDITIONAL:
1401
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1402
break;
1403
default:
1404
return 0;
1405
}
1406
1407
insn->retpoline_safe = true;
1408
1409
/*
1410
* Whatever stack impact regular CALLs have, should be undone
1411
* by the RETURN of the called function.
1412
*
1413
* Annotated intra-function calls retain the stack_ops but
1414
* are converted to JUMP, see read_intra_function_calls().
1415
*/
1416
remove_insn_ops(insn);
1417
1418
return annotate_call_site(file, insn, false);
1419
}
1420
1421
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1422
{
1423
/*
1424
* Return thunk tail calls are really just returns in disguise,
1425
* so convert them accordingly.
1426
*/
1427
insn->type = INSN_RETURN;
1428
insn->retpoline_safe = true;
1429
1430
if (add)
1431
list_add_tail(&insn->call_node, &file->return_thunk_list);
1432
}
1433
1434
static bool is_first_func_insn(struct objtool_file *file,
1435
struct instruction *insn, struct symbol *sym)
1436
{
1437
if (insn->offset == sym->offset)
1438
return true;
1439
1440
/* Allow direct CALL/JMP past ENDBR */
1441
if (opts.ibt) {
1442
struct instruction *prev = prev_insn_same_sym(file, insn);
1443
1444
if (prev && prev->type == INSN_ENDBR &&
1445
insn->offset == sym->offset + prev->len)
1446
return true;
1447
}
1448
1449
return false;
1450
}
1451
1452
/*
1453
* A sibling call is a tail-call to another symbol -- to differentiate from a
1454
* recursive tail-call which is to the same symbol.
1455
*/
1456
static bool jump_is_sibling_call(struct objtool_file *file,
1457
struct instruction *from, struct instruction *to)
1458
{
1459
struct symbol *fs = from->sym;
1460
struct symbol *ts = to->sym;
1461
1462
/* Not a sibling call if from/to a symbol hole */
1463
if (!fs || !ts)
1464
return false;
1465
1466
/* Not a sibling call if not targeting the start of a symbol. */
1467
if (!is_first_func_insn(file, to, ts))
1468
return false;
1469
1470
/* Disallow sibling calls into STT_NOTYPE */
1471
if (ts->type == STT_NOTYPE)
1472
return false;
1473
1474
/* Must not be self to be a sibling */
1475
return fs->pfunc != ts->pfunc;
1476
}
1477
1478
/*
1479
* Find the destination instructions for all jumps.
1480
*/
1481
static int add_jump_destinations(struct objtool_file *file)
1482
{
1483
struct instruction *insn, *jump_dest;
1484
struct reloc *reloc;
1485
struct section *dest_sec;
1486
unsigned long dest_off;
1487
int ret;
1488
1489
for_each_insn(file, insn) {
1490
struct symbol *func = insn_func(insn);
1491
1492
if (insn->jump_dest) {
1493
/*
1494
* handle_group_alt() may have previously set
1495
* 'jump_dest' for some alternatives.
1496
*/
1497
continue;
1498
}
1499
if (!is_static_jump(insn))
1500
continue;
1501
1502
reloc = insn_reloc(file, insn);
1503
if (!reloc) {
1504
dest_sec = insn->sec;
1505
dest_off = arch_jump_destination(insn);
1506
} else if (reloc->sym->type == STT_SECTION) {
1507
dest_sec = reloc->sym->sec;
1508
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
1509
} else if (reloc->sym->retpoline_thunk) {
1510
ret = add_retpoline_call(file, insn);
1511
if (ret)
1512
return ret;
1513
continue;
1514
} else if (reloc->sym->return_thunk) {
1515
add_return_call(file, insn, true);
1516
continue;
1517
} else if (func) {
1518
/*
1519
* External sibling call or internal sibling call with
1520
* STT_FUNC reloc.
1521
*/
1522
ret = add_call_dest(file, insn, reloc->sym, true);
1523
if (ret)
1524
return ret;
1525
continue;
1526
} else if (reloc->sym->sec->idx) {
1527
dest_sec = reloc->sym->sec;
1528
dest_off = reloc->sym->sym.st_value +
1529
arch_dest_reloc_offset(reloc_addend(reloc));
1530
} else {
1531
/* non-func asm code jumping to another file */
1532
continue;
1533
}
1534
1535
jump_dest = find_insn(file, dest_sec, dest_off);
1536
if (!jump_dest) {
1537
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1538
1539
/*
1540
* This is a special case for retbleed_untrain_ret().
1541
* It jumps to __x86_return_thunk(), but objtool
1542
* can't find the thunk's starting RET
1543
* instruction, because the RET is also in the
1544
* middle of another instruction. Objtool only
1545
* knows about the outer instruction.
1546
*/
1547
if (sym && sym->embedded_insn) {
1548
add_return_call(file, insn, false);
1549
continue;
1550
}
1551
1552
/*
1553
* GCOV/KCOV dead code can jump to the end of the
1554
* function/section.
1555
*/
1556
if (file->ignore_unreachables && func &&
1557
dest_sec == insn->sec &&
1558
dest_off == func->offset + func->len)
1559
continue;
1560
1561
ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
1562
dest_sec->name, dest_off);
1563
return -1;
1564
}
1565
1566
/*
1567
* An intra-TU jump in retpoline.o might not have a relocation
1568
* for its jump dest, in which case the above
1569
* add_{retpoline,return}_call() didn't happen.
1570
*/
1571
if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
1572
if (jump_dest->sym->retpoline_thunk) {
1573
ret = add_retpoline_call(file, insn);
1574
if (ret)
1575
return ret;
1576
continue;
1577
}
1578
if (jump_dest->sym->return_thunk) {
1579
add_return_call(file, insn, true);
1580
continue;
1581
}
1582
}
1583
1584
/*
1585
* Cross-function jump.
1586
*/
1587
if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
1588
1589
/*
1590
* For GCC 8+, create parent/child links for any cold
1591
* subfunctions. This is _mostly_ redundant with a
1592
* similar initialization in read_symbols().
1593
*
1594
* If a function has aliases, we want the *first* such
1595
* function in the symbol table to be the subfunction's
1596
* parent. In that case we overwrite the
1597
* initialization done in read_symbols().
1598
*
1599
* However this code can't completely replace the
1600
* read_symbols() code because this doesn't detect the
1601
* case where the parent function's only reference to a
1602
* subfunction is through a jump table.
1603
*/
1604
if (!strstr(func->name, ".cold") &&
1605
strstr(insn_func(jump_dest)->name, ".cold")) {
1606
func->cfunc = insn_func(jump_dest);
1607
insn_func(jump_dest)->pfunc = func;
1608
}
1609
}
1610
1611
if (jump_is_sibling_call(file, insn, jump_dest)) {
1612
/*
1613
* Internal sibling call without reloc or with
1614
* STT_SECTION reloc.
1615
*/
1616
ret = add_call_dest(file, insn, insn_func(jump_dest), true);
1617
if (ret)
1618
return ret;
1619
continue;
1620
}
1621
1622
insn->jump_dest = jump_dest;
1623
}
1624
1625
return 0;
1626
}
1627
1628
static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1629
{
1630
struct symbol *call_dest;
1631
1632
call_dest = find_func_by_offset(sec, offset);
1633
if (!call_dest)
1634
call_dest = find_symbol_by_offset(sec, offset);
1635
1636
return call_dest;
1637
}
1638
1639
/*
1640
* Find the destination instructions for all calls.
1641
*/
1642
static int add_call_destinations(struct objtool_file *file)
1643
{
1644
struct instruction *insn;
1645
unsigned long dest_off;
1646
struct symbol *dest;
1647
struct reloc *reloc;
1648
int ret;
1649
1650
for_each_insn(file, insn) {
1651
struct symbol *func = insn_func(insn);
1652
if (insn->type != INSN_CALL)
1653
continue;
1654
1655
reloc = insn_reloc(file, insn);
1656
if (!reloc) {
1657
dest_off = arch_jump_destination(insn);
1658
dest = find_call_destination(insn->sec, dest_off);
1659
1660
ret = add_call_dest(file, insn, dest, false);
1661
if (ret)
1662
return ret;
1663
1664
if (func && func->ignore)
1665
continue;
1666
1667
if (!insn_call_dest(insn)) {
1668
ERROR_INSN(insn, "unannotated intra-function call");
1669
return -1;
1670
}
1671
1672
if (func && insn_call_dest(insn)->type != STT_FUNC) {
1673
ERROR_INSN(insn, "unsupported call to non-function");
1674
return -1;
1675
}
1676
1677
} else if (reloc->sym->type == STT_SECTION) {
1678
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
1679
dest = find_call_destination(reloc->sym->sec, dest_off);
1680
if (!dest) {
1681
ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1682
reloc->sym->sec->name, dest_off);
1683
return -1;
1684
}
1685
1686
ret = add_call_dest(file, insn, dest, false);
1687
if (ret)
1688
return ret;
1689
1690
} else if (reloc->sym->retpoline_thunk) {
1691
ret = add_retpoline_call(file, insn);
1692
if (ret)
1693
return ret;
1694
1695
} else {
1696
ret = add_call_dest(file, insn, reloc->sym, false);
1697
if (ret)
1698
return ret;
1699
}
1700
}
1701
1702
return 0;
1703
}
1704
1705
/*
1706
* The .alternatives section requires some extra special care over and above
1707
* other special sections because alternatives are patched in place.
1708
*/
1709
static int handle_group_alt(struct objtool_file *file,
1710
struct special_alt *special_alt,
1711
struct instruction *orig_insn,
1712
struct instruction **new_insn)
1713
{
1714
struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1715
struct alt_group *orig_alt_group, *new_alt_group;
1716
unsigned long dest_off;
1717
1718
orig_alt_group = orig_insn->alt_group;
1719
if (!orig_alt_group) {
1720
struct instruction *last_orig_insn = NULL;
1721
1722
orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1723
if (!orig_alt_group) {
1724
ERROR_GLIBC("calloc");
1725
return -1;
1726
}
1727
orig_alt_group->cfi = calloc(special_alt->orig_len,
1728
sizeof(struct cfi_state *));
1729
if (!orig_alt_group->cfi) {
1730
ERROR_GLIBC("calloc");
1731
return -1;
1732
}
1733
1734
insn = orig_insn;
1735
sec_for_each_insn_from(file, insn) {
1736
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1737
break;
1738
1739
insn->alt_group = orig_alt_group;
1740
last_orig_insn = insn;
1741
}
1742
orig_alt_group->orig_group = NULL;
1743
orig_alt_group->first_insn = orig_insn;
1744
orig_alt_group->last_insn = last_orig_insn;
1745
orig_alt_group->nop = NULL;
1746
orig_alt_group->ignore = orig_insn->ignore_alts;
1747
} else {
1748
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1749
orig_alt_group->first_insn->offset != special_alt->orig_len) {
1750
ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1751
orig_alt_group->last_insn->offset +
1752
orig_alt_group->last_insn->len -
1753
orig_alt_group->first_insn->offset,
1754
special_alt->orig_len);
1755
return -1;
1756
}
1757
}
1758
1759
new_alt_group = calloc(1, sizeof(*new_alt_group));
1760
if (!new_alt_group) {
1761
ERROR_GLIBC("calloc");
1762
return -1;
1763
}
1764
1765
if (special_alt->new_len < special_alt->orig_len) {
1766
/*
1767
* Insert a fake nop at the end to make the replacement
1768
* alt_group the same size as the original. This is needed to
1769
* allow propagate_alt_cfi() to do its magic. When the last
1770
* instruction affects the stack, the instruction after it (the
1771
* nop) will propagate the new state to the shared CFI array.
1772
*/
1773
nop = calloc(1, sizeof(*nop));
1774
if (!nop) {
1775
ERROR_GLIBC("calloc");
1776
return -1;
1777
}
1778
memset(nop, 0, sizeof(*nop));
1779
1780
nop->sec = special_alt->new_sec;
1781
nop->offset = special_alt->new_off + special_alt->new_len;
1782
nop->len = special_alt->orig_len - special_alt->new_len;
1783
nop->type = INSN_NOP;
1784
nop->sym = orig_insn->sym;
1785
nop->alt_group = new_alt_group;
1786
}
1787
1788
if (!special_alt->new_len) {
1789
*new_insn = nop;
1790
goto end;
1791
}
1792
1793
insn = *new_insn;
1794
sec_for_each_insn_from(file, insn) {
1795
struct reloc *alt_reloc;
1796
1797
if (insn->offset >= special_alt->new_off + special_alt->new_len)
1798
break;
1799
1800
last_new_insn = insn;
1801
1802
insn->sym = orig_insn->sym;
1803
insn->alt_group = new_alt_group;
1804
1805
/*
1806
* Since alternative replacement code is copy/pasted by the
1807
* kernel after applying relocations, generally such code can't
1808
* have relative-address relocation references to outside the
1809
* .altinstr_replacement section, unless the arch's
1810
* alternatives code can adjust the relative offsets
1811
* accordingly.
1812
*/
1813
alt_reloc = insn_reloc(file, insn);
1814
if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1815
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1816
1817
ERROR_INSN(insn, "unsupported relocation in alternatives section");
1818
return -1;
1819
}
1820
1821
if (!is_static_jump(insn))
1822
continue;
1823
1824
if (!insn->immediate)
1825
continue;
1826
1827
dest_off = arch_jump_destination(insn);
1828
if (dest_off == special_alt->new_off + special_alt->new_len) {
1829
insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1830
if (!insn->jump_dest) {
1831
ERROR_INSN(insn, "can't find alternative jump destination");
1832
return -1;
1833
}
1834
}
1835
}
1836
1837
if (!last_new_insn) {
1838
ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1839
"can't find last new alternative instruction");
1840
return -1;
1841
}
1842
1843
end:
1844
new_alt_group->orig_group = orig_alt_group;
1845
new_alt_group->first_insn = *new_insn;
1846
new_alt_group->last_insn = last_new_insn;
1847
new_alt_group->nop = nop;
1848
new_alt_group->ignore = (*new_insn)->ignore_alts;
1849
new_alt_group->cfi = orig_alt_group->cfi;
1850
return 0;
1851
}
1852
1853
/*
1854
* A jump table entry can either convert a nop to a jump or a jump to a nop.
1855
* If the original instruction is a jump, make the alt entry an effective nop
1856
* by just skipping the original instruction.
1857
*/
1858
static int handle_jump_alt(struct objtool_file *file,
1859
struct special_alt *special_alt,
1860
struct instruction *orig_insn,
1861
struct instruction **new_insn)
1862
{
1863
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1864
orig_insn->type != INSN_NOP) {
1865
1866
ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1867
return -1;
1868
}
1869
1870
if (opts.hack_jump_label && special_alt->key_addend & 2) {
1871
struct reloc *reloc = insn_reloc(file, orig_insn);
1872
1873
if (reloc)
1874
set_reloc_type(file->elf, reloc, R_NONE);
1875
1876
if (elf_write_insn(file->elf, orig_insn->sec,
1877
orig_insn->offset, orig_insn->len,
1878
arch_nop_insn(orig_insn->len))) {
1879
return -1;
1880
}
1881
1882
orig_insn->type = INSN_NOP;
1883
}
1884
1885
if (orig_insn->type == INSN_NOP) {
1886
if (orig_insn->len == 2)
1887
file->jl_nop_short++;
1888
else
1889
file->jl_nop_long++;
1890
1891
return 0;
1892
}
1893
1894
if (orig_insn->len == 2)
1895
file->jl_short++;
1896
else
1897
file->jl_long++;
1898
1899
*new_insn = next_insn_same_sec(file, orig_insn);
1900
return 0;
1901
}
1902
1903
/*
1904
* Read all the special sections which have alternate instructions which can be
1905
* patched in or redirected to at runtime. Each instruction having alternate
1906
* instruction(s) has them added to its insn->alts list, which will be
1907
* traversed in validate_branch().
1908
*/
1909
static int add_special_section_alts(struct objtool_file *file)
1910
{
1911
struct list_head special_alts;
1912
struct instruction *orig_insn, *new_insn;
1913
struct special_alt *special_alt, *tmp;
1914
struct alternative *alt;
1915
int ret;
1916
1917
if (special_get_alts(file->elf, &special_alts))
1918
return -1;
1919
1920
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1921
1922
orig_insn = find_insn(file, special_alt->orig_sec,
1923
special_alt->orig_off);
1924
if (!orig_insn) {
1925
ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1926
"special: can't find orig instruction");
1927
return -1;
1928
}
1929
1930
new_insn = NULL;
1931
if (!special_alt->group || special_alt->new_len) {
1932
new_insn = find_insn(file, special_alt->new_sec,
1933
special_alt->new_off);
1934
if (!new_insn) {
1935
ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1936
"special: can't find new instruction");
1937
return -1;
1938
}
1939
}
1940
1941
if (special_alt->group) {
1942
if (!special_alt->orig_len) {
1943
ERROR_INSN(orig_insn, "empty alternative entry");
1944
continue;
1945
}
1946
1947
ret = handle_group_alt(file, special_alt, orig_insn,
1948
&new_insn);
1949
if (ret)
1950
return ret;
1951
1952
} else if (special_alt->jump_or_nop) {
1953
ret = handle_jump_alt(file, special_alt, orig_insn,
1954
&new_insn);
1955
if (ret)
1956
return ret;
1957
}
1958
1959
alt = calloc(1, sizeof(*alt));
1960
if (!alt) {
1961
ERROR_GLIBC("calloc");
1962
return -1;
1963
}
1964
1965
alt->insn = new_insn;
1966
alt->next = orig_insn->alts;
1967
orig_insn->alts = alt;
1968
1969
list_del(&special_alt->list);
1970
free(special_alt);
1971
}
1972
1973
if (opts.stats) {
1974
printf("jl\\\tNOP\tJMP\n");
1975
printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1976
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1977
}
1978
1979
return 0;
1980
}
1981
1982
__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
1983
{
1984
return reloc->sym->offset + reloc_addend(reloc);
1985
}
1986
1987
static int add_jump_table(struct objtool_file *file, struct instruction *insn)
1988
{
1989
unsigned long table_size = insn_jump_table_size(insn);
1990
struct symbol *pfunc = insn_func(insn)->pfunc;
1991
struct reloc *table = insn_jump_table(insn);
1992
struct instruction *dest_insn;
1993
unsigned int prev_offset = 0;
1994
struct reloc *reloc = table;
1995
struct alternative *alt;
1996
unsigned long sym_offset;
1997
1998
/*
1999
* Each @reloc is a switch table relocation which points to the target
2000
* instruction.
2001
*/
2002
for_each_reloc_from(table->sec, reloc) {
2003
2004
/* Check for the end of the table: */
2005
if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2006
break;
2007
if (reloc != table && is_jump_table(reloc))
2008
break;
2009
2010
/* Make sure the table entries are consecutive: */
2011
if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2012
break;
2013
2014
sym_offset = arch_jump_table_sym_offset(reloc, table);
2015
2016
/* Detect function pointers from contiguous objects: */
2017
if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2018
break;
2019
2020
/*
2021
* Clang sometimes leaves dangling unused jump table entries
2022
* which point to the end of the function. Ignore them.
2023
*/
2024
if (reloc->sym->sec == pfunc->sec &&
2025
sym_offset == pfunc->offset + pfunc->len)
2026
goto next;
2027
2028
dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2029
if (!dest_insn)
2030
break;
2031
2032
/* Make sure the destination is in the same function: */
2033
if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2034
break;
2035
2036
alt = calloc(1, sizeof(*alt));
2037
if (!alt) {
2038
ERROR_GLIBC("calloc");
2039
return -1;
2040
}
2041
2042
alt->insn = dest_insn;
2043
alt->next = insn->alts;
2044
insn->alts = alt;
2045
next:
2046
prev_offset = reloc_offset(reloc);
2047
}
2048
2049
if (!prev_offset) {
2050
ERROR_INSN(insn, "can't find switch jump table");
2051
return -1;
2052
}
2053
2054
return 0;
2055
}
2056
2057
/*
2058
* find_jump_table() - Given a dynamic jump, find the switch jump table
2059
* associated with it.
2060
*/
2061
static void find_jump_table(struct objtool_file *file, struct symbol *func,
2062
struct instruction *insn)
2063
{
2064
struct reloc *table_reloc;
2065
struct instruction *dest_insn, *orig_insn = insn;
2066
unsigned long table_size;
2067
unsigned long sym_offset;
2068
2069
/*
2070
* Backward search using the @first_jump_src links, these help avoid
2071
* much of the 'in between' code. Which avoids us getting confused by
2072
* it.
2073
*/
2074
for (;
2075
insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2076
insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2077
2078
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2079
break;
2080
2081
/* allow small jumps within the range */
2082
if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2083
insn->jump_dest &&
2084
(insn->jump_dest->offset <= insn->offset ||
2085
insn->jump_dest->offset > orig_insn->offset))
2086
break;
2087
2088
table_reloc = arch_find_switch_table(file, insn, &table_size);
2089
if (!table_reloc)
2090
continue;
2091
2092
sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2093
2094
dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2095
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2096
continue;
2097
2098
set_jump_table(table_reloc);
2099
orig_insn->_jump_table = table_reloc;
2100
orig_insn->_jump_table_size = table_size;
2101
2102
break;
2103
}
2104
}
2105
2106
/*
2107
* First pass: Mark the head of each jump table so that in the next pass,
2108
* we know when a given jump table ends and the next one starts.
2109
*/
2110
static void mark_func_jump_tables(struct objtool_file *file,
2111
struct symbol *func)
2112
{
2113
struct instruction *insn, *last = NULL;
2114
2115
func_for_each_insn(file, func, insn) {
2116
if (!last)
2117
last = insn;
2118
2119
/*
2120
* Store back-pointers for unconditional forward jumps such
2121
* that find_jump_table() can back-track using those and
2122
* avoid some potentially confusing code.
2123
*/
2124
if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2125
insn->offset > last->offset &&
2126
insn->jump_dest->offset > insn->offset &&
2127
!insn->jump_dest->first_jump_src) {
2128
2129
insn->jump_dest->first_jump_src = insn;
2130
last = insn->jump_dest;
2131
}
2132
2133
if (insn->type != INSN_JUMP_DYNAMIC)
2134
continue;
2135
2136
find_jump_table(file, func, insn);
2137
}
2138
}
2139
2140
static int add_func_jump_tables(struct objtool_file *file,
2141
struct symbol *func)
2142
{
2143
struct instruction *insn;
2144
int ret;
2145
2146
func_for_each_insn(file, func, insn) {
2147
if (!insn_jump_table(insn))
2148
continue;
2149
2150
ret = add_jump_table(file, insn);
2151
if (ret)
2152
return ret;
2153
}
2154
2155
return 0;
2156
}
2157
2158
/*
2159
* For some switch statements, gcc generates a jump table in the .rodata
2160
* section which contains a list of addresses within the function to jump to.
2161
* This finds these jump tables and adds them to the insn->alts lists.
2162
*/
2163
static int add_jump_table_alts(struct objtool_file *file)
2164
{
2165
struct symbol *func;
2166
int ret;
2167
2168
if (!file->rodata)
2169
return 0;
2170
2171
for_each_sym(file, func) {
2172
if (func->type != STT_FUNC)
2173
continue;
2174
2175
mark_func_jump_tables(file, func);
2176
ret = add_func_jump_tables(file, func);
2177
if (ret)
2178
return ret;
2179
}
2180
2181
return 0;
2182
}
2183
2184
static void set_func_state(struct cfi_state *state)
2185
{
2186
state->cfa = initial_func_cfi.cfa;
2187
memcpy(&state->regs, &initial_func_cfi.regs,
2188
CFI_NUM_REGS * sizeof(struct cfi_reg));
2189
state->stack_size = initial_func_cfi.cfa.offset;
2190
state->type = UNWIND_HINT_TYPE_CALL;
2191
}
2192
2193
static int read_unwind_hints(struct objtool_file *file)
2194
{
2195
struct cfi_state cfi = init_cfi;
2196
struct section *sec;
2197
struct unwind_hint *hint;
2198
struct instruction *insn;
2199
struct reloc *reloc;
2200
unsigned long offset;
2201
int i;
2202
2203
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2204
if (!sec)
2205
return 0;
2206
2207
if (!sec->rsec) {
2208
ERROR("missing .rela.discard.unwind_hints section");
2209
return -1;
2210
}
2211
2212
if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2213
ERROR("struct unwind_hint size mismatch");
2214
return -1;
2215
}
2216
2217
file->hints = true;
2218
2219
for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2220
hint = (struct unwind_hint *)sec->data->d_buf + i;
2221
2222
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2223
if (!reloc) {
2224
ERROR("can't find reloc for unwind_hints[%d]", i);
2225
return -1;
2226
}
2227
2228
if (reloc->sym->type == STT_SECTION) {
2229
offset = reloc_addend(reloc);
2230
} else if (reloc->sym->local_label) {
2231
offset = reloc->sym->offset;
2232
} else {
2233
ERROR("unexpected relocation symbol type in %s", sec->rsec->name);
2234
return -1;
2235
}
2236
2237
insn = find_insn(file, reloc->sym->sec, offset);
2238
if (!insn) {
2239
ERROR("can't find insn for unwind_hints[%d]", i);
2240
return -1;
2241
}
2242
2243
insn->hint = true;
2244
2245
if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2246
insn->cfi = &force_undefined_cfi;
2247
continue;
2248
}
2249
2250
if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2251
insn->hint = false;
2252
insn->save = true;
2253
continue;
2254
}
2255
2256
if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2257
insn->restore = true;
2258
continue;
2259
}
2260
2261
if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2262
struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2263
2264
if (sym && sym->bind == STB_GLOBAL) {
2265
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2266
ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2267
return -1;
2268
}
2269
}
2270
}
2271
2272
if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2273
insn->cfi = &func_cfi;
2274
continue;
2275
}
2276
2277
if (insn->cfi)
2278
cfi = *(insn->cfi);
2279
2280
if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2281
ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2282
return -1;
2283
}
2284
2285
cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2286
cfi.type = hint->type;
2287
cfi.signal = hint->signal;
2288
2289
insn->cfi = cfi_hash_find_or_add(&cfi);
2290
}
2291
2292
return 0;
2293
}
2294
2295
static int read_annotate(struct objtool_file *file,
2296
int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2297
{
2298
struct section *sec;
2299
struct instruction *insn;
2300
struct reloc *reloc;
2301
uint64_t offset;
2302
int type, ret;
2303
2304
sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2305
if (!sec)
2306
return 0;
2307
2308
if (!sec->rsec)
2309
return 0;
2310
2311
if (sec->sh.sh_entsize != 8) {
2312
static bool warned = false;
2313
if (!warned && opts.verbose) {
2314
WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2315
warned = true;
2316
}
2317
sec->sh.sh_entsize = 8;
2318
}
2319
2320
for_each_reloc(sec->rsec, reloc) {
2321
type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * sec->sh.sh_entsize) + 4);
2322
type = bswap_if_needed(file->elf, type);
2323
2324
offset = reloc->sym->offset + reloc_addend(reloc);
2325
insn = find_insn(file, reloc->sym->sec, offset);
2326
2327
if (!insn) {
2328
ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2329
return -1;
2330
}
2331
2332
ret = func(file, type, insn);
2333
if (ret < 0)
2334
return ret;
2335
}
2336
2337
return 0;
2338
}
2339
2340
static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2341
{
2342
switch (type) {
2343
2344
/* Must be before add_special_section_alts() */
2345
case ANNOTYPE_IGNORE_ALTS:
2346
insn->ignore_alts = true;
2347
break;
2348
2349
/*
2350
* Must be before read_unwind_hints() since that needs insn->noendbr.
2351
*/
2352
case ANNOTYPE_NOENDBR:
2353
insn->noendbr = 1;
2354
break;
2355
2356
default:
2357
break;
2358
}
2359
2360
return 0;
2361
}
2362
2363
static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2364
{
2365
unsigned long dest_off;
2366
2367
if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2368
return 0;
2369
2370
if (insn->type != INSN_CALL) {
2371
ERROR_INSN(insn, "intra_function_call not a direct call");
2372
return -1;
2373
}
2374
2375
/*
2376
* Treat intra-function CALLs as JMPs, but with a stack_op.
2377
* See add_call_destinations(), which strips stack_ops from
2378
* normal CALLs.
2379
*/
2380
insn->type = INSN_JUMP_UNCONDITIONAL;
2381
2382
dest_off = arch_jump_destination(insn);
2383
insn->jump_dest = find_insn(file, insn->sec, dest_off);
2384
if (!insn->jump_dest) {
2385
ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2386
insn->sec->name, dest_off);
2387
return -1;
2388
}
2389
2390
return 0;
2391
}
2392
2393
static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2394
{
2395
switch (type) {
2396
case ANNOTYPE_NOENDBR:
2397
/* early */
2398
break;
2399
2400
case ANNOTYPE_RETPOLINE_SAFE:
2401
if (insn->type != INSN_JUMP_DYNAMIC &&
2402
insn->type != INSN_CALL_DYNAMIC &&
2403
insn->type != INSN_RETURN &&
2404
insn->type != INSN_NOP) {
2405
ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2406
return -1;
2407
}
2408
2409
insn->retpoline_safe = true;
2410
break;
2411
2412
case ANNOTYPE_INSTR_BEGIN:
2413
insn->instr++;
2414
break;
2415
2416
case ANNOTYPE_INSTR_END:
2417
insn->instr--;
2418
break;
2419
2420
case ANNOTYPE_UNRET_BEGIN:
2421
insn->unret = 1;
2422
break;
2423
2424
case ANNOTYPE_IGNORE_ALTS:
2425
/* early */
2426
break;
2427
2428
case ANNOTYPE_INTRA_FUNCTION_CALL:
2429
/* ifc */
2430
break;
2431
2432
case ANNOTYPE_REACHABLE:
2433
insn->dead_end = false;
2434
break;
2435
2436
default:
2437
ERROR_INSN(insn, "Unknown annotation type: %d", type);
2438
return -1;
2439
}
2440
2441
return 0;
2442
}
2443
2444
/*
2445
* Return true if name matches an instrumentation function, where calls to that
2446
* function from noinstr code can safely be removed, but compilers won't do so.
2447
*/
2448
static bool is_profiling_func(const char *name)
2449
{
2450
/*
2451
* Many compilers cannot disable KCOV with a function attribute.
2452
*/
2453
if (!strncmp(name, "__sanitizer_cov_", 16))
2454
return true;
2455
2456
/*
2457
* Some compilers currently do not remove __tsan_func_entry/exit nor
2458
* __tsan_atomic_signal_fence (used for barrier instrumentation) with
2459
* the __no_sanitize_thread attribute, remove them. Once the kernel's
2460
* minimum Clang version is 14.0, this can be removed.
2461
*/
2462
if (!strncmp(name, "__tsan_func_", 12) ||
2463
!strcmp(name, "__tsan_atomic_signal_fence"))
2464
return true;
2465
2466
return false;
2467
}
2468
2469
static int classify_symbols(struct objtool_file *file)
2470
{
2471
struct symbol *func;
2472
2473
for_each_sym(file, func) {
2474
if (func->type == STT_NOTYPE && strstarts(func->name, ".L"))
2475
func->local_label = true;
2476
2477
if (func->bind != STB_GLOBAL)
2478
continue;
2479
2480
if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2481
strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2482
func->static_call_tramp = true;
2483
2484
if (arch_is_retpoline(func))
2485
func->retpoline_thunk = true;
2486
2487
if (arch_is_rethunk(func))
2488
func->return_thunk = true;
2489
2490
if (arch_is_embedded_insn(func))
2491
func->embedded_insn = true;
2492
2493
if (arch_ftrace_match(func->name))
2494
func->fentry = true;
2495
2496
if (is_profiling_func(func->name))
2497
func->profiling_func = true;
2498
}
2499
2500
return 0;
2501
}
2502
2503
static void mark_rodata(struct objtool_file *file)
2504
{
2505
struct section *sec;
2506
bool found = false;
2507
2508
/*
2509
* Search for the following rodata sections, each of which can
2510
* potentially contain jump tables:
2511
*
2512
* - .rodata: can contain GCC switch tables
2513
* - .rodata.<func>: same, if -fdata-sections is being used
2514
* - .data.rel.ro.c_jump_table: contains C annotated jump tables
2515
*
2516
* .rodata.str1.* sections are ignored; they don't contain jump tables.
2517
*/
2518
for_each_sec(file, sec) {
2519
if ((!strncmp(sec->name, ".rodata", 7) &&
2520
!strstr(sec->name, ".str1.")) ||
2521
!strncmp(sec->name, ".data.rel.ro", 12)) {
2522
sec->rodata = true;
2523
found = true;
2524
}
2525
}
2526
2527
file->rodata = found;
2528
}
2529
2530
static int decode_sections(struct objtool_file *file)
2531
{
2532
int ret;
2533
2534
mark_rodata(file);
2535
2536
ret = init_pv_ops(file);
2537
if (ret)
2538
return ret;
2539
2540
/*
2541
* Must be before add_{jump_call}_destination.
2542
*/
2543
ret = classify_symbols(file);
2544
if (ret)
2545
return ret;
2546
2547
ret = decode_instructions(file);
2548
if (ret)
2549
return ret;
2550
2551
ret = add_ignores(file);
2552
if (ret)
2553
return ret;
2554
2555
add_uaccess_safe(file);
2556
2557
ret = read_annotate(file, __annotate_early);
2558
if (ret)
2559
return ret;
2560
2561
/*
2562
* Must be before add_jump_destinations(), which depends on 'func'
2563
* being set for alternatives, to enable proper sibling call detection.
2564
*/
2565
if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2566
ret = add_special_section_alts(file);
2567
if (ret)
2568
return ret;
2569
}
2570
2571
ret = add_jump_destinations(file);
2572
if (ret)
2573
return ret;
2574
2575
/*
2576
* Must be before add_call_destination(); it changes INSN_CALL to
2577
* INSN_JUMP.
2578
*/
2579
ret = read_annotate(file, __annotate_ifc);
2580
if (ret)
2581
return ret;
2582
2583
ret = add_call_destinations(file);
2584
if (ret)
2585
return ret;
2586
2587
ret = add_jump_table_alts(file);
2588
if (ret)
2589
return ret;
2590
2591
ret = read_unwind_hints(file);
2592
if (ret)
2593
return ret;
2594
2595
/*
2596
* Must be after add_call_destinations() such that it can override
2597
* dead_end_function() marks.
2598
*/
2599
ret = read_annotate(file, __annotate_late);
2600
if (ret)
2601
return ret;
2602
2603
return 0;
2604
}
2605
2606
static bool is_special_call(struct instruction *insn)
2607
{
2608
if (insn->type == INSN_CALL) {
2609
struct symbol *dest = insn_call_dest(insn);
2610
2611
if (!dest)
2612
return false;
2613
2614
if (dest->fentry || dest->embedded_insn)
2615
return true;
2616
}
2617
2618
return false;
2619
}
2620
2621
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2622
{
2623
struct cfi_state *cfi = &state->cfi;
2624
int i;
2625
2626
if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2627
return true;
2628
2629
if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2630
return true;
2631
2632
if (cfi->stack_size != initial_func_cfi.cfa.offset)
2633
return true;
2634
2635
for (i = 0; i < CFI_NUM_REGS; i++) {
2636
if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2637
cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2638
return true;
2639
}
2640
2641
return false;
2642
}
2643
2644
static bool check_reg_frame_pos(const struct cfi_reg *reg,
2645
int expected_offset)
2646
{
2647
return reg->base == CFI_CFA &&
2648
reg->offset == expected_offset;
2649
}
2650
2651
static bool has_valid_stack_frame(struct insn_state *state)
2652
{
2653
struct cfi_state *cfi = &state->cfi;
2654
2655
if (cfi->cfa.base == CFI_BP &&
2656
check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2657
check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2658
return true;
2659
2660
if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2661
return true;
2662
2663
return false;
2664
}
2665
2666
static int update_cfi_state_regs(struct instruction *insn,
2667
struct cfi_state *cfi,
2668
struct stack_op *op)
2669
{
2670
struct cfi_reg *cfa = &cfi->cfa;
2671
2672
if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2673
return 0;
2674
2675
/* push */
2676
if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2677
cfa->offset += 8;
2678
2679
/* pop */
2680
if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2681
cfa->offset -= 8;
2682
2683
/* add immediate to sp */
2684
if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2685
op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2686
cfa->offset -= op->src.offset;
2687
2688
return 0;
2689
}
2690
2691
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2692
{
2693
if (arch_callee_saved_reg(reg) &&
2694
cfi->regs[reg].base == CFI_UNDEFINED) {
2695
cfi->regs[reg].base = base;
2696
cfi->regs[reg].offset = offset;
2697
}
2698
}
2699
2700
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2701
{
2702
cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2703
cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2704
}
2705
2706
/*
2707
* A note about DRAP stack alignment:
2708
*
2709
* GCC has the concept of a DRAP register, which is used to help keep track of
2710
* the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2711
* register. The typical DRAP pattern is:
2712
*
2713
* 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2714
* 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2715
* 41 ff 72 f8 pushq -0x8(%r10)
2716
* 55 push %rbp
2717
* 48 89 e5 mov %rsp,%rbp
2718
* (more pushes)
2719
* 41 52 push %r10
2720
* ...
2721
* 41 5a pop %r10
2722
* (more pops)
2723
* 5d pop %rbp
2724
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2725
* c3 retq
2726
*
2727
* There are some variations in the epilogues, like:
2728
*
2729
* 5b pop %rbx
2730
* 41 5a pop %r10
2731
* 41 5c pop %r12
2732
* 41 5d pop %r13
2733
* 41 5e pop %r14
2734
* c9 leaveq
2735
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2736
* c3 retq
2737
*
2738
* and:
2739
*
2740
* 4c 8b 55 e8 mov -0x18(%rbp),%r10
2741
* 48 8b 5d e0 mov -0x20(%rbp),%rbx
2742
* 4c 8b 65 f0 mov -0x10(%rbp),%r12
2743
* 4c 8b 6d f8 mov -0x8(%rbp),%r13
2744
* c9 leaveq
2745
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2746
* c3 retq
2747
*
2748
* Sometimes r13 is used as the DRAP register, in which case it's saved and
2749
* restored beforehand:
2750
*
2751
* 41 55 push %r13
2752
* 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2753
* 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2754
* ...
2755
* 49 8d 65 f0 lea -0x10(%r13),%rsp
2756
* 41 5d pop %r13
2757
* c3 retq
2758
*/
2759
static int update_cfi_state(struct instruction *insn,
2760
struct instruction *next_insn,
2761
struct cfi_state *cfi, struct stack_op *op)
2762
{
2763
struct cfi_reg *cfa = &cfi->cfa;
2764
struct cfi_reg *regs = cfi->regs;
2765
2766
/* ignore UNWIND_HINT_UNDEFINED regions */
2767
if (cfi->force_undefined)
2768
return 0;
2769
2770
/* stack operations don't make sense with an undefined CFA */
2771
if (cfa->base == CFI_UNDEFINED) {
2772
if (insn_func(insn)) {
2773
WARN_INSN(insn, "undefined stack state");
2774
return 1;
2775
}
2776
return 0;
2777
}
2778
2779
if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2780
cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2781
return update_cfi_state_regs(insn, cfi, op);
2782
2783
switch (op->dest.type) {
2784
2785
case OP_DEST_REG:
2786
switch (op->src.type) {
2787
2788
case OP_SRC_REG:
2789
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2790
cfa->base == CFI_SP &&
2791
check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2792
2793
/* mov %rsp, %rbp */
2794
cfa->base = op->dest.reg;
2795
cfi->bp_scratch = false;
2796
}
2797
2798
else if (op->src.reg == CFI_SP &&
2799
op->dest.reg == CFI_BP && cfi->drap) {
2800
2801
/* drap: mov %rsp, %rbp */
2802
regs[CFI_BP].base = CFI_BP;
2803
regs[CFI_BP].offset = -cfi->stack_size;
2804
cfi->bp_scratch = false;
2805
}
2806
2807
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2808
2809
/*
2810
* mov %rsp, %reg
2811
*
2812
* This is needed for the rare case where GCC
2813
* does:
2814
*
2815
* mov %rsp, %rax
2816
* ...
2817
* mov %rax, %rsp
2818
*/
2819
cfi->vals[op->dest.reg].base = CFI_CFA;
2820
cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2821
}
2822
2823
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2824
(cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2825
2826
/*
2827
* mov %rbp, %rsp
2828
*
2829
* Restore the original stack pointer (Clang).
2830
*/
2831
cfi->stack_size = -cfi->regs[CFI_BP].offset;
2832
}
2833
2834
else if (op->dest.reg == cfa->base) {
2835
2836
/* mov %reg, %rsp */
2837
if (cfa->base == CFI_SP &&
2838
cfi->vals[op->src.reg].base == CFI_CFA) {
2839
2840
/*
2841
* This is needed for the rare case
2842
* where GCC does something dumb like:
2843
*
2844
* lea 0x8(%rsp), %rcx
2845
* ...
2846
* mov %rcx, %rsp
2847
*/
2848
cfa->offset = -cfi->vals[op->src.reg].offset;
2849
cfi->stack_size = cfa->offset;
2850
2851
} else if (cfa->base == CFI_SP &&
2852
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2853
cfi->vals[op->src.reg].offset == cfa->offset) {
2854
2855
/*
2856
* Stack swizzle:
2857
*
2858
* 1: mov %rsp, (%[tos])
2859
* 2: mov %[tos], %rsp
2860
* ...
2861
* 3: pop %rsp
2862
*
2863
* Where:
2864
*
2865
* 1 - places a pointer to the previous
2866
* stack at the Top-of-Stack of the
2867
* new stack.
2868
*
2869
* 2 - switches to the new stack.
2870
*
2871
* 3 - pops the Top-of-Stack to restore
2872
* the original stack.
2873
*
2874
* Note: we set base to SP_INDIRECT
2875
* here and preserve offset. Therefore
2876
* when the unwinder reaches ToS it
2877
* will dereference SP and then add the
2878
* offset to find the next frame, IOW:
2879
* (%rsp) + offset.
2880
*/
2881
cfa->base = CFI_SP_INDIRECT;
2882
2883
} else {
2884
cfa->base = CFI_UNDEFINED;
2885
cfa->offset = 0;
2886
}
2887
}
2888
2889
else if (op->dest.reg == CFI_SP &&
2890
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2891
cfi->vals[op->src.reg].offset == cfa->offset) {
2892
2893
/*
2894
* The same stack swizzle case 2) as above. But
2895
* because we can't change cfa->base, case 3)
2896
* will become a regular POP. Pretend we're a
2897
* PUSH so things don't go unbalanced.
2898
*/
2899
cfi->stack_size += 8;
2900
}
2901
2902
2903
break;
2904
2905
case OP_SRC_ADD:
2906
if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2907
2908
/* add imm, %rsp */
2909
cfi->stack_size -= op->src.offset;
2910
if (cfa->base == CFI_SP)
2911
cfa->offset -= op->src.offset;
2912
break;
2913
}
2914
2915
if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2916
insn->sym->frame_pointer) {
2917
/* addi.d fp,sp,imm on LoongArch */
2918
if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2919
cfa->base = CFI_BP;
2920
cfa->offset = 0;
2921
}
2922
break;
2923
}
2924
2925
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2926
/* addi.d sp,fp,imm on LoongArch */
2927
if (cfa->base == CFI_BP && cfa->offset == 0) {
2928
if (insn->sym->frame_pointer) {
2929
cfa->base = CFI_SP;
2930
cfa->offset = -op->src.offset;
2931
}
2932
} else {
2933
/* lea disp(%rbp), %rsp */
2934
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2935
}
2936
break;
2937
}
2938
2939
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2940
2941
/* drap: lea disp(%rsp), %drap */
2942
cfi->drap_reg = op->dest.reg;
2943
2944
/*
2945
* lea disp(%rsp), %reg
2946
*
2947
* This is needed for the rare case where GCC
2948
* does something dumb like:
2949
*
2950
* lea 0x8(%rsp), %rcx
2951
* ...
2952
* mov %rcx, %rsp
2953
*/
2954
cfi->vals[op->dest.reg].base = CFI_CFA;
2955
cfi->vals[op->dest.reg].offset = \
2956
-cfi->stack_size + op->src.offset;
2957
2958
break;
2959
}
2960
2961
if (cfi->drap && op->dest.reg == CFI_SP &&
2962
op->src.reg == cfi->drap_reg) {
2963
2964
/* drap: lea disp(%drap), %rsp */
2965
cfa->base = CFI_SP;
2966
cfa->offset = cfi->stack_size = -op->src.offset;
2967
cfi->drap_reg = CFI_UNDEFINED;
2968
cfi->drap = false;
2969
break;
2970
}
2971
2972
if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2973
WARN_INSN(insn, "unsupported stack register modification");
2974
return -1;
2975
}
2976
2977
break;
2978
2979
case OP_SRC_AND:
2980
if (op->dest.reg != CFI_SP ||
2981
(cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2982
(cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2983
WARN_INSN(insn, "unsupported stack pointer realignment");
2984
return -1;
2985
}
2986
2987
if (cfi->drap_reg != CFI_UNDEFINED) {
2988
/* drap: and imm, %rsp */
2989
cfa->base = cfi->drap_reg;
2990
cfa->offset = cfi->stack_size = 0;
2991
cfi->drap = true;
2992
}
2993
2994
/*
2995
* Older versions of GCC (4.8ish) realign the stack
2996
* without DRAP, with a frame pointer.
2997
*/
2998
2999
break;
3000
3001
case OP_SRC_POP:
3002
case OP_SRC_POPF:
3003
if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3004
3005
/* pop %rsp; # restore from a stack swizzle */
3006
cfa->base = CFI_SP;
3007
break;
3008
}
3009
3010
if (!cfi->drap && op->dest.reg == cfa->base) {
3011
3012
/* pop %rbp */
3013
cfa->base = CFI_SP;
3014
}
3015
3016
if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3017
op->dest.reg == cfi->drap_reg &&
3018
cfi->drap_offset == -cfi->stack_size) {
3019
3020
/* drap: pop %drap */
3021
cfa->base = cfi->drap_reg;
3022
cfa->offset = 0;
3023
cfi->drap_offset = -1;
3024
3025
} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3026
3027
/* pop %reg */
3028
restore_reg(cfi, op->dest.reg);
3029
}
3030
3031
cfi->stack_size -= 8;
3032
if (cfa->base == CFI_SP)
3033
cfa->offset -= 8;
3034
3035
break;
3036
3037
case OP_SRC_REG_INDIRECT:
3038
if (!cfi->drap && op->dest.reg == cfa->base &&
3039
op->dest.reg == CFI_BP) {
3040
3041
/* mov disp(%rsp), %rbp */
3042
cfa->base = CFI_SP;
3043
cfa->offset = cfi->stack_size;
3044
}
3045
3046
if (cfi->drap && op->src.reg == CFI_BP &&
3047
op->src.offset == cfi->drap_offset) {
3048
3049
/* drap: mov disp(%rbp), %drap */
3050
cfa->base = cfi->drap_reg;
3051
cfa->offset = 0;
3052
cfi->drap_offset = -1;
3053
}
3054
3055
if (cfi->drap && op->src.reg == CFI_BP &&
3056
op->src.offset == regs[op->dest.reg].offset) {
3057
3058
/* drap: mov disp(%rbp), %reg */
3059
restore_reg(cfi, op->dest.reg);
3060
3061
} else if (op->src.reg == cfa->base &&
3062
op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3063
3064
/* mov disp(%rbp), %reg */
3065
/* mov disp(%rsp), %reg */
3066
restore_reg(cfi, op->dest.reg);
3067
3068
} else if (op->src.reg == CFI_SP &&
3069
op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3070
3071
/* mov disp(%rsp), %reg */
3072
restore_reg(cfi, op->dest.reg);
3073
}
3074
3075
break;
3076
3077
default:
3078
WARN_INSN(insn, "unknown stack-related instruction");
3079
return -1;
3080
}
3081
3082
break;
3083
3084
case OP_DEST_PUSH:
3085
case OP_DEST_PUSHF:
3086
cfi->stack_size += 8;
3087
if (cfa->base == CFI_SP)
3088
cfa->offset += 8;
3089
3090
if (op->src.type != OP_SRC_REG)
3091
break;
3092
3093
if (cfi->drap) {
3094
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3095
3096
/* drap: push %drap */
3097
cfa->base = CFI_BP_INDIRECT;
3098
cfa->offset = -cfi->stack_size;
3099
3100
/* save drap so we know when to restore it */
3101
cfi->drap_offset = -cfi->stack_size;
3102
3103
} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3104
3105
/* drap: push %rbp */
3106
cfi->stack_size = 0;
3107
3108
} else {
3109
3110
/* drap: push %reg */
3111
save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3112
}
3113
3114
} else {
3115
3116
/* push %reg */
3117
save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3118
}
3119
3120
/* detect when asm code uses rbp as a scratch register */
3121
if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3122
cfa->base != CFI_BP)
3123
cfi->bp_scratch = true;
3124
break;
3125
3126
case OP_DEST_REG_INDIRECT:
3127
3128
if (cfi->drap) {
3129
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3130
3131
/* drap: mov %drap, disp(%rbp) */
3132
cfa->base = CFI_BP_INDIRECT;
3133
cfa->offset = op->dest.offset;
3134
3135
/* save drap offset so we know when to restore it */
3136
cfi->drap_offset = op->dest.offset;
3137
} else {
3138
3139
/* drap: mov reg, disp(%rbp) */
3140
save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3141
}
3142
3143
} else if (op->dest.reg == cfa->base) {
3144
3145
/* mov reg, disp(%rbp) */
3146
/* mov reg, disp(%rsp) */
3147
save_reg(cfi, op->src.reg, CFI_CFA,
3148
op->dest.offset - cfi->cfa.offset);
3149
3150
} else if (op->dest.reg == CFI_SP) {
3151
3152
/* mov reg, disp(%rsp) */
3153
save_reg(cfi, op->src.reg, CFI_CFA,
3154
op->dest.offset - cfi->stack_size);
3155
3156
} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3157
3158
/* mov %rsp, (%reg); # setup a stack swizzle. */
3159
cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3160
cfi->vals[op->dest.reg].offset = cfa->offset;
3161
}
3162
3163
break;
3164
3165
case OP_DEST_MEM:
3166
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3167
WARN_INSN(insn, "unknown stack-related memory operation");
3168
return -1;
3169
}
3170
3171
/* pop mem */
3172
cfi->stack_size -= 8;
3173
if (cfa->base == CFI_SP)
3174
cfa->offset -= 8;
3175
3176
break;
3177
3178
default:
3179
WARN_INSN(insn, "unknown stack-related instruction");
3180
return -1;
3181
}
3182
3183
return 0;
3184
}
3185
3186
/*
3187
* The stack layouts of alternatives instructions can sometimes diverge when
3188
* they have stack modifications. That's fine as long as the potential stack
3189
* layouts don't conflict at any given potential instruction boundary.
3190
*
3191
* Flatten the CFIs of the different alternative code streams (both original
3192
* and replacement) into a single shared CFI array which can be used to detect
3193
* conflicts and nicely feed a linear array of ORC entries to the unwinder.
3194
*/
3195
static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3196
{
3197
struct cfi_state **alt_cfi;
3198
int group_off;
3199
3200
if (!insn->alt_group)
3201
return 0;
3202
3203
if (!insn->cfi) {
3204
WARN("CFI missing");
3205
return -1;
3206
}
3207
3208
alt_cfi = insn->alt_group->cfi;
3209
group_off = insn->offset - insn->alt_group->first_insn->offset;
3210
3211
if (!alt_cfi[group_off]) {
3212
alt_cfi[group_off] = insn->cfi;
3213
} else {
3214
if (cficmp(alt_cfi[group_off], insn->cfi)) {
3215
struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3216
struct instruction *orig = orig_group->first_insn;
3217
WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3218
offstr(insn->sec, insn->offset));
3219
return -1;
3220
}
3221
}
3222
3223
return 0;
3224
}
3225
3226
static int handle_insn_ops(struct instruction *insn,
3227
struct instruction *next_insn,
3228
struct insn_state *state)
3229
{
3230
struct stack_op *op;
3231
int ret;
3232
3233
for (op = insn->stack_ops; op; op = op->next) {
3234
3235
ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3236
if (ret)
3237
return ret;
3238
3239
if (!opts.uaccess || !insn->alt_group)
3240
continue;
3241
3242
if (op->dest.type == OP_DEST_PUSHF) {
3243
if (!state->uaccess_stack) {
3244
state->uaccess_stack = 1;
3245
} else if (state->uaccess_stack >> 31) {
3246
WARN_INSN(insn, "PUSHF stack exhausted");
3247
return 1;
3248
}
3249
state->uaccess_stack <<= 1;
3250
state->uaccess_stack |= state->uaccess;
3251
}
3252
3253
if (op->src.type == OP_SRC_POPF) {
3254
if (state->uaccess_stack) {
3255
state->uaccess = state->uaccess_stack & 1;
3256
state->uaccess_stack >>= 1;
3257
if (state->uaccess_stack == 1)
3258
state->uaccess_stack = 0;
3259
}
3260
}
3261
}
3262
3263
return 0;
3264
}
3265
3266
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3267
{
3268
struct cfi_state *cfi1 = insn->cfi;
3269
int i;
3270
3271
if (!cfi1) {
3272
WARN("CFI missing");
3273
return false;
3274
}
3275
3276
if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3277
3278
WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3279
cfi1->cfa.base, cfi1->cfa.offset,
3280
cfi2->cfa.base, cfi2->cfa.offset);
3281
return false;
3282
3283
}
3284
3285
if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3286
for (i = 0; i < CFI_NUM_REGS; i++) {
3287
3288
if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3289
continue;
3290
3291
WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3292
i, cfi1->regs[i].base, cfi1->regs[i].offset,
3293
i, cfi2->regs[i].base, cfi2->regs[i].offset);
3294
}
3295
return false;
3296
}
3297
3298
if (cfi1->type != cfi2->type) {
3299
3300
WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3301
cfi1->type, cfi2->type);
3302
return false;
3303
}
3304
3305
if (cfi1->drap != cfi2->drap ||
3306
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3307
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3308
3309
WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3310
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3311
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3312
return false;
3313
}
3314
3315
return true;
3316
}
3317
3318
static inline bool func_uaccess_safe(struct symbol *func)
3319
{
3320
if (func)
3321
return func->uaccess_safe;
3322
3323
return false;
3324
}
3325
3326
static inline const char *call_dest_name(struct instruction *insn)
3327
{
3328
static char pvname[19];
3329
struct reloc *reloc;
3330
int idx;
3331
3332
if (insn_call_dest(insn))
3333
return insn_call_dest(insn)->name;
3334
3335
reloc = insn_reloc(NULL, insn);
3336
if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3337
idx = (reloc_addend(reloc) / sizeof(void *));
3338
snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3339
return pvname;
3340
}
3341
3342
return "{dynamic}";
3343
}
3344
3345
static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3346
{
3347
struct symbol *target;
3348
struct reloc *reloc;
3349
int idx;
3350
3351
reloc = insn_reloc(file, insn);
3352
if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3353
return false;
3354
3355
idx = (arch_dest_reloc_offset(reloc_addend(reloc)) / sizeof(void *));
3356
3357
if (file->pv_ops[idx].clean)
3358
return true;
3359
3360
file->pv_ops[idx].clean = true;
3361
3362
list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3363
if (!target->sec->noinstr) {
3364
WARN("pv_ops[%d]: %s", idx, target->name);
3365
file->pv_ops[idx].clean = false;
3366
}
3367
}
3368
3369
return file->pv_ops[idx].clean;
3370
}
3371
3372
static inline bool noinstr_call_dest(struct objtool_file *file,
3373
struct instruction *insn,
3374
struct symbol *func)
3375
{
3376
/*
3377
* We can't deal with indirect function calls at present;
3378
* assume they're instrumented.
3379
*/
3380
if (!func) {
3381
if (file->pv_ops)
3382
return pv_call_dest(file, insn);
3383
3384
return false;
3385
}
3386
3387
/*
3388
* If the symbol is from a noinstr section; we good.
3389
*/
3390
if (func->sec->noinstr)
3391
return true;
3392
3393
/*
3394
* If the symbol is a static_call trampoline, we can't tell.
3395
*/
3396
if (func->static_call_tramp)
3397
return true;
3398
3399
/*
3400
* The __ubsan_handle_*() calls are like WARN(), they only happen when
3401
* something 'BAD' happened. At the risk of taking the machine down,
3402
* let them proceed to get the message out.
3403
*/
3404
if (!strncmp(func->name, "__ubsan_handle_", 15))
3405
return true;
3406
3407
return false;
3408
}
3409
3410
static int validate_call(struct objtool_file *file,
3411
struct instruction *insn,
3412
struct insn_state *state)
3413
{
3414
if (state->noinstr && state->instr <= 0 &&
3415
!noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3416
WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3417
return 1;
3418
}
3419
3420
if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3421
WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3422
return 1;
3423
}
3424
3425
if (state->df) {
3426
WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3427
return 1;
3428
}
3429
3430
return 0;
3431
}
3432
3433
static int validate_sibling_call(struct objtool_file *file,
3434
struct instruction *insn,
3435
struct insn_state *state)
3436
{
3437
if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3438
WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3439
return 1;
3440
}
3441
3442
return validate_call(file, insn, state);
3443
}
3444
3445
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3446
{
3447
if (state->noinstr && state->instr > 0) {
3448
WARN_INSN(insn, "return with instrumentation enabled");
3449
return 1;
3450
}
3451
3452
if (state->uaccess && !func_uaccess_safe(func)) {
3453
WARN_INSN(insn, "return with UACCESS enabled");
3454
return 1;
3455
}
3456
3457
if (!state->uaccess && func_uaccess_safe(func)) {
3458
WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3459
return 1;
3460
}
3461
3462
if (state->df) {
3463
WARN_INSN(insn, "return with DF set");
3464
return 1;
3465
}
3466
3467
if (func && has_modified_stack_frame(insn, state)) {
3468
WARN_INSN(insn, "return with modified stack frame");
3469
return 1;
3470
}
3471
3472
if (state->cfi.bp_scratch) {
3473
WARN_INSN(insn, "BP used as a scratch register");
3474
return 1;
3475
}
3476
3477
return 0;
3478
}
3479
3480
static struct instruction *next_insn_to_validate(struct objtool_file *file,
3481
struct instruction *insn)
3482
{
3483
struct alt_group *alt_group = insn->alt_group;
3484
3485
/*
3486
* Simulate the fact that alternatives are patched in-place. When the
3487
* end of a replacement alt_group is reached, redirect objtool flow to
3488
* the end of the original alt_group.
3489
*
3490
* insn->alts->insn -> alt_group->first_insn
3491
* ...
3492
* alt_group->last_insn
3493
* [alt_group->nop] -> next(orig_group->last_insn)
3494
*/
3495
if (alt_group) {
3496
if (alt_group->nop) {
3497
/* ->nop implies ->orig_group */
3498
if (insn == alt_group->last_insn)
3499
return alt_group->nop;
3500
if (insn == alt_group->nop)
3501
goto next_orig;
3502
}
3503
if (insn == alt_group->last_insn && alt_group->orig_group)
3504
goto next_orig;
3505
}
3506
3507
return next_insn_same_sec(file, insn);
3508
3509
next_orig:
3510
return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3511
}
3512
3513
static bool skip_alt_group(struct instruction *insn)
3514
{
3515
struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3516
3517
/* ANNOTATE_IGNORE_ALTERNATIVE */
3518
if (insn->alt_group && insn->alt_group->ignore)
3519
return true;
3520
3521
/*
3522
* For NOP patched with CLAC/STAC, only follow the latter to avoid
3523
* impossible code paths combining patched CLAC with unpatched STAC
3524
* or vice versa.
3525
*
3526
* ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3527
* requested not to do that to avoid hurting .s file readability
3528
* around CLAC/STAC alternative sites.
3529
*/
3530
3531
if (!alt_insn)
3532
return false;
3533
3534
/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3535
if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3536
return false;
3537
3538
return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3539
}
3540
3541
/*
3542
* Follow the branch starting at the given instruction, and recursively follow
3543
* any other branches (jumps). Meanwhile, track the frame pointer state at
3544
* each instruction and validate all the rules described in
3545
* tools/objtool/Documentation/objtool.txt.
3546
*/
3547
static int validate_branch(struct objtool_file *file, struct symbol *func,
3548
struct instruction *insn, struct insn_state state)
3549
{
3550
struct alternative *alt;
3551
struct instruction *next_insn, *prev_insn = NULL;
3552
struct section *sec;
3553
u8 visited;
3554
int ret;
3555
3556
if (func && func->ignore)
3557
return 0;
3558
3559
sec = insn->sec;
3560
3561
while (1) {
3562
next_insn = next_insn_to_validate(file, insn);
3563
3564
if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3565
/* Ignore KCFI type preambles, which always fall through */
3566
if (!strncmp(func->name, "__cfi_", 6) ||
3567
!strncmp(func->name, "__pfx_", 6))
3568
return 0;
3569
3570
if (file->ignore_unreachables)
3571
return 0;
3572
3573
WARN("%s() falls through to next function %s()",
3574
func->name, insn_func(insn)->name);
3575
func->warned = 1;
3576
3577
return 1;
3578
}
3579
3580
visited = VISITED_BRANCH << state.uaccess;
3581
if (insn->visited & VISITED_BRANCH_MASK) {
3582
if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3583
return 1;
3584
3585
if (insn->visited & visited)
3586
return 0;
3587
} else {
3588
nr_insns_visited++;
3589
}
3590
3591
if (state.noinstr)
3592
state.instr += insn->instr;
3593
3594
if (insn->hint) {
3595
if (insn->restore) {
3596
struct instruction *save_insn, *i;
3597
3598
i = insn;
3599
save_insn = NULL;
3600
3601
sym_for_each_insn_continue_reverse(file, func, i) {
3602
if (i->save) {
3603
save_insn = i;
3604
break;
3605
}
3606
}
3607
3608
if (!save_insn) {
3609
WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3610
return 1;
3611
}
3612
3613
if (!save_insn->visited) {
3614
/*
3615
* If the restore hint insn is at the
3616
* beginning of a basic block and was
3617
* branched to from elsewhere, and the
3618
* save insn hasn't been visited yet,
3619
* defer following this branch for now.
3620
* It will be seen later via the
3621
* straight-line path.
3622
*/
3623
if (!prev_insn)
3624
return 0;
3625
3626
WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3627
return 1;
3628
}
3629
3630
insn->cfi = save_insn->cfi;
3631
nr_cfi_reused++;
3632
}
3633
3634
state.cfi = *insn->cfi;
3635
} else {
3636
/* XXX track if we actually changed state.cfi */
3637
3638
if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3639
insn->cfi = prev_insn->cfi;
3640
nr_cfi_reused++;
3641
} else {
3642
insn->cfi = cfi_hash_find_or_add(&state.cfi);
3643
}
3644
}
3645
3646
insn->visited |= visited;
3647
3648
if (propagate_alt_cfi(file, insn))
3649
return 1;
3650
3651
if (insn->alts) {
3652
for (alt = insn->alts; alt; alt = alt->next) {
3653
ret = validate_branch(file, func, alt->insn, state);
3654
if (ret) {
3655
BT_INSN(insn, "(alt)");
3656
return ret;
3657
}
3658
}
3659
}
3660
3661
if (skip_alt_group(insn))
3662
return 0;
3663
3664
if (handle_insn_ops(insn, next_insn, &state))
3665
return 1;
3666
3667
switch (insn->type) {
3668
3669
case INSN_RETURN:
3670
return validate_return(func, insn, &state);
3671
3672
case INSN_CALL:
3673
case INSN_CALL_DYNAMIC:
3674
ret = validate_call(file, insn, &state);
3675
if (ret)
3676
return ret;
3677
3678
if (opts.stackval && func && !is_special_call(insn) &&
3679
!has_valid_stack_frame(&state)) {
3680
WARN_INSN(insn, "call without frame pointer save/setup");
3681
return 1;
3682
}
3683
3684
break;
3685
3686
case INSN_JUMP_CONDITIONAL:
3687
case INSN_JUMP_UNCONDITIONAL:
3688
if (is_sibling_call(insn)) {
3689
ret = validate_sibling_call(file, insn, &state);
3690
if (ret)
3691
return ret;
3692
3693
} else if (insn->jump_dest) {
3694
ret = validate_branch(file, func,
3695
insn->jump_dest, state);
3696
if (ret) {
3697
BT_INSN(insn, "(branch)");
3698
return ret;
3699
}
3700
}
3701
3702
if (insn->type == INSN_JUMP_UNCONDITIONAL)
3703
return 0;
3704
3705
break;
3706
3707
case INSN_JUMP_DYNAMIC:
3708
case INSN_JUMP_DYNAMIC_CONDITIONAL:
3709
if (is_sibling_call(insn)) {
3710
ret = validate_sibling_call(file, insn, &state);
3711
if (ret)
3712
return ret;
3713
}
3714
3715
if (insn->type == INSN_JUMP_DYNAMIC)
3716
return 0;
3717
3718
break;
3719
3720
case INSN_SYSCALL:
3721
if (func && (!next_insn || !next_insn->hint)) {
3722
WARN_INSN(insn, "unsupported instruction in callable function");
3723
return 1;
3724
}
3725
3726
break;
3727
3728
case INSN_SYSRET:
3729
if (func && (!next_insn || !next_insn->hint)) {
3730
WARN_INSN(insn, "unsupported instruction in callable function");
3731
return 1;
3732
}
3733
3734
return 0;
3735
3736
case INSN_STAC:
3737
if (!opts.uaccess)
3738
break;
3739
3740
if (state.uaccess) {
3741
WARN_INSN(insn, "recursive UACCESS enable");
3742
return 1;
3743
}
3744
3745
state.uaccess = true;
3746
break;
3747
3748
case INSN_CLAC:
3749
if (!opts.uaccess)
3750
break;
3751
3752
if (!state.uaccess && func) {
3753
WARN_INSN(insn, "redundant UACCESS disable");
3754
return 1;
3755
}
3756
3757
if (func_uaccess_safe(func) && !state.uaccess_stack) {
3758
WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3759
return 1;
3760
}
3761
3762
state.uaccess = false;
3763
break;
3764
3765
case INSN_STD:
3766
if (state.df) {
3767
WARN_INSN(insn, "recursive STD");
3768
return 1;
3769
}
3770
3771
state.df = true;
3772
break;
3773
3774
case INSN_CLD:
3775
if (!state.df && func) {
3776
WARN_INSN(insn, "redundant CLD");
3777
return 1;
3778
}
3779
3780
state.df = false;
3781
break;
3782
3783
default:
3784
break;
3785
}
3786
3787
if (insn->dead_end)
3788
return 0;
3789
3790
if (!next_insn) {
3791
if (state.cfi.cfa.base == CFI_UNDEFINED)
3792
return 0;
3793
if (file->ignore_unreachables)
3794
return 0;
3795
3796
WARN("%s%sunexpected end of section %s",
3797
func ? func->name : "", func ? "(): " : "",
3798
sec->name);
3799
return 1;
3800
}
3801
3802
prev_insn = insn;
3803
insn = next_insn;
3804
}
3805
3806
return 0;
3807
}
3808
3809
static int validate_unwind_hint(struct objtool_file *file,
3810
struct instruction *insn,
3811
struct insn_state *state)
3812
{
3813
if (insn->hint && !insn->visited) {
3814
int ret = validate_branch(file, insn_func(insn), insn, *state);
3815
if (ret)
3816
BT_INSN(insn, "<=== (hint)");
3817
return ret;
3818
}
3819
3820
return 0;
3821
}
3822
3823
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3824
{
3825
struct instruction *insn;
3826
struct insn_state state;
3827
int warnings = 0;
3828
3829
if (!file->hints)
3830
return 0;
3831
3832
init_insn_state(file, &state, sec);
3833
3834
if (sec) {
3835
sec_for_each_insn(file, sec, insn)
3836
warnings += validate_unwind_hint(file, insn, &state);
3837
} else {
3838
for_each_insn(file, insn)
3839
warnings += validate_unwind_hint(file, insn, &state);
3840
}
3841
3842
return warnings;
3843
}
3844
3845
/*
3846
* Validate rethunk entry constraint: must untrain RET before the first RET.
3847
*
3848
* Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3849
* before an actual RET instruction.
3850
*/
3851
static int validate_unret(struct objtool_file *file, struct instruction *insn)
3852
{
3853
struct instruction *next, *dest;
3854
int ret;
3855
3856
for (;;) {
3857
next = next_insn_to_validate(file, insn);
3858
3859
if (insn->visited & VISITED_UNRET)
3860
return 0;
3861
3862
insn->visited |= VISITED_UNRET;
3863
3864
if (insn->alts) {
3865
struct alternative *alt;
3866
for (alt = insn->alts; alt; alt = alt->next) {
3867
ret = validate_unret(file, alt->insn);
3868
if (ret) {
3869
BT_INSN(insn, "(alt)");
3870
return ret;
3871
}
3872
}
3873
}
3874
3875
switch (insn->type) {
3876
3877
case INSN_CALL_DYNAMIC:
3878
case INSN_JUMP_DYNAMIC:
3879
case INSN_JUMP_DYNAMIC_CONDITIONAL:
3880
WARN_INSN(insn, "early indirect call");
3881
return 1;
3882
3883
case INSN_JUMP_UNCONDITIONAL:
3884
case INSN_JUMP_CONDITIONAL:
3885
if (!is_sibling_call(insn)) {
3886
if (!insn->jump_dest) {
3887
WARN_INSN(insn, "unresolved jump target after linking?!?");
3888
return 1;
3889
}
3890
ret = validate_unret(file, insn->jump_dest);
3891
if (ret) {
3892
BT_INSN(insn, "(branch%s)",
3893
insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3894
return ret;
3895
}
3896
3897
if (insn->type == INSN_JUMP_UNCONDITIONAL)
3898
return 0;
3899
3900
break;
3901
}
3902
3903
/* fallthrough */
3904
case INSN_CALL:
3905
dest = find_insn(file, insn_call_dest(insn)->sec,
3906
insn_call_dest(insn)->offset);
3907
if (!dest) {
3908
WARN("Unresolved function after linking!?: %s",
3909
insn_call_dest(insn)->name);
3910
return 1;
3911
}
3912
3913
ret = validate_unret(file, dest);
3914
if (ret) {
3915
BT_INSN(insn, "(call)");
3916
return ret;
3917
}
3918
/*
3919
* If a call returns without error, it must have seen UNTRAIN_RET.
3920
* Therefore any non-error return is a success.
3921
*/
3922
return 0;
3923
3924
case INSN_RETURN:
3925
WARN_INSN(insn, "RET before UNTRAIN");
3926
return 1;
3927
3928
case INSN_SYSCALL:
3929
break;
3930
3931
case INSN_SYSRET:
3932
return 0;
3933
3934
case INSN_NOP:
3935
if (insn->retpoline_safe)
3936
return 0;
3937
break;
3938
3939
default:
3940
break;
3941
}
3942
3943
if (insn->dead_end)
3944
return 0;
3945
3946
if (!next) {
3947
WARN_INSN(insn, "teh end!");
3948
return 1;
3949
}
3950
insn = next;
3951
}
3952
3953
return 0;
3954
}
3955
3956
/*
3957
* Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3958
* VALIDATE_UNRET_END before RET.
3959
*/
3960
static int validate_unrets(struct objtool_file *file)
3961
{
3962
struct instruction *insn;
3963
int warnings = 0;
3964
3965
for_each_insn(file, insn) {
3966
if (!insn->unret)
3967
continue;
3968
3969
warnings += validate_unret(file, insn);
3970
}
3971
3972
return warnings;
3973
}
3974
3975
static int validate_retpoline(struct objtool_file *file)
3976
{
3977
struct instruction *insn;
3978
int warnings = 0;
3979
3980
for_each_insn(file, insn) {
3981
if (insn->type != INSN_JUMP_DYNAMIC &&
3982
insn->type != INSN_CALL_DYNAMIC &&
3983
insn->type != INSN_RETURN)
3984
continue;
3985
3986
if (insn->retpoline_safe)
3987
continue;
3988
3989
if (insn->sec->init)
3990
continue;
3991
3992
if (insn->type == INSN_RETURN) {
3993
if (opts.rethunk) {
3994
WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
3995
warnings++;
3996
}
3997
continue;
3998
}
3999
4000
WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4001
insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4002
warnings++;
4003
}
4004
4005
return warnings;
4006
}
4007
4008
static bool is_kasan_insn(struct instruction *insn)
4009
{
4010
return (insn->type == INSN_CALL &&
4011
!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4012
}
4013
4014
static bool is_ubsan_insn(struct instruction *insn)
4015
{
4016
return (insn->type == INSN_CALL &&
4017
!strcmp(insn_call_dest(insn)->name,
4018
"__ubsan_handle_builtin_unreachable"));
4019
}
4020
4021
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4022
{
4023
struct symbol *func = insn_func(insn);
4024
struct instruction *prev_insn;
4025
int i;
4026
4027
if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore))
4028
return true;
4029
4030
/*
4031
* Ignore alternative replacement instructions. This can happen
4032
* when a whitelisted function uses one of the ALTERNATIVE macros.
4033
*/
4034
if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4035
!strcmp(insn->sec->name, ".altinstr_aux"))
4036
return true;
4037
4038
/*
4039
* Whole archive runs might encounter dead code from weak symbols.
4040
* This is where the linker will have dropped the weak symbol in
4041
* favour of a regular symbol, but leaves the code in place.
4042
*
4043
* In this case we'll find a piece of code (whole function) that is not
4044
* covered by a !section symbol. Ignore them.
4045
*/
4046
if (opts.link && !func) {
4047
int size = find_symbol_hole_containing(insn->sec, insn->offset);
4048
unsigned long end = insn->offset + size;
4049
4050
if (!size) /* not a hole */
4051
return false;
4052
4053
if (size < 0) /* hole until the end */
4054
return true;
4055
4056
sec_for_each_insn_continue(file, insn) {
4057
/*
4058
* If we reach a visited instruction at or before the
4059
* end of the hole, ignore the unreachable.
4060
*/
4061
if (insn->visited)
4062
return true;
4063
4064
if (insn->offset >= end)
4065
break;
4066
4067
/*
4068
* If this hole jumps to a .cold function, mark it ignore too.
4069
*/
4070
if (insn->jump_dest && insn_func(insn->jump_dest) &&
4071
strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4072
insn_func(insn->jump_dest)->ignore = true;
4073
}
4074
}
4075
4076
return false;
4077
}
4078
4079
if (!func)
4080
return false;
4081
4082
if (func->static_call_tramp)
4083
return true;
4084
4085
/*
4086
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4087
* __builtin_unreachable(). The BUG() macro has an unreachable() after
4088
* the UD2, which causes GCC's undefined trap logic to emit another UD2
4089
* (or occasionally a JMP to UD2).
4090
*
4091
* It may also insert a UD2 after calling a __noreturn function.
4092
*/
4093
prev_insn = prev_insn_same_sec(file, insn);
4094
if (prev_insn && prev_insn->dead_end &&
4095
(insn->type == INSN_BUG ||
4096
(insn->type == INSN_JUMP_UNCONDITIONAL &&
4097
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4098
return true;
4099
4100
/*
4101
* Check if this (or a subsequent) instruction is related to
4102
* CONFIG_UBSAN or CONFIG_KASAN.
4103
*
4104
* End the search at 5 instructions to avoid going into the weeds.
4105
*/
4106
for (i = 0; i < 5; i++) {
4107
4108
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4109
return true;
4110
4111
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4112
if (insn->jump_dest &&
4113
insn_func(insn->jump_dest) == func) {
4114
insn = insn->jump_dest;
4115
continue;
4116
}
4117
4118
break;
4119
}
4120
4121
if (insn->offset + insn->len >= func->offset + func->len)
4122
break;
4123
4124
insn = next_insn_same_sec(file, insn);
4125
}
4126
4127
return false;
4128
}
4129
4130
static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
4131
{
4132
struct instruction *insn, *prev;
4133
struct cfi_state *cfi;
4134
4135
insn = find_insn(file, func->sec, func->offset);
4136
if (!insn)
4137
return -1;
4138
4139
for (prev = prev_insn_same_sec(file, insn);
4140
prev;
4141
prev = prev_insn_same_sec(file, prev)) {
4142
u64 offset;
4143
4144
if (prev->type != INSN_NOP)
4145
return -1;
4146
4147
offset = func->offset - prev->offset;
4148
4149
if (offset > opts.prefix)
4150
return -1;
4151
4152
if (offset < opts.prefix)
4153
continue;
4154
4155
elf_create_prefix_symbol(file->elf, func, opts.prefix);
4156
break;
4157
}
4158
4159
if (!prev)
4160
return -1;
4161
4162
if (!insn->cfi) {
4163
/*
4164
* This can happen if stack validation isn't enabled or the
4165
* function is annotated with STACK_FRAME_NON_STANDARD.
4166
*/
4167
return 0;
4168
}
4169
4170
/* Propagate insn->cfi to the prefix code */
4171
cfi = cfi_hash_find_or_add(insn->cfi);
4172
for (; prev != insn; prev = next_insn_same_sec(file, prev))
4173
prev->cfi = cfi;
4174
4175
return 0;
4176
}
4177
4178
static int add_prefix_symbols(struct objtool_file *file)
4179
{
4180
struct section *sec;
4181
struct symbol *func;
4182
4183
for_each_sec(file, sec) {
4184
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4185
continue;
4186
4187
sec_for_each_sym(sec, func) {
4188
if (func->type != STT_FUNC)
4189
continue;
4190
4191
add_prefix_symbol(file, func);
4192
}
4193
}
4194
4195
return 0;
4196
}
4197
4198
static int validate_symbol(struct objtool_file *file, struct section *sec,
4199
struct symbol *sym, struct insn_state *state)
4200
{
4201
struct instruction *insn;
4202
int ret;
4203
4204
if (!sym->len) {
4205
WARN("%s() is missing an ELF size annotation", sym->name);
4206
return 1;
4207
}
4208
4209
if (sym->pfunc != sym || sym->alias != sym)
4210
return 0;
4211
4212
insn = find_insn(file, sec, sym->offset);
4213
if (!insn || insn->visited)
4214
return 0;
4215
4216
if (opts.uaccess)
4217
state->uaccess = sym->uaccess_safe;
4218
4219
ret = validate_branch(file, insn_func(insn), insn, *state);
4220
if (ret)
4221
BT_INSN(insn, "<=== (sym)");
4222
return ret;
4223
}
4224
4225
static int validate_section(struct objtool_file *file, struct section *sec)
4226
{
4227
struct insn_state state;
4228
struct symbol *func;
4229
int warnings = 0;
4230
4231
sec_for_each_sym(sec, func) {
4232
if (func->type != STT_FUNC)
4233
continue;
4234
4235
init_insn_state(file, &state, sec);
4236
set_func_state(&state.cfi);
4237
4238
warnings += validate_symbol(file, sec, func, &state);
4239
}
4240
4241
return warnings;
4242
}
4243
4244
static int validate_noinstr_sections(struct objtool_file *file)
4245
{
4246
struct section *sec;
4247
int warnings = 0;
4248
4249
sec = find_section_by_name(file->elf, ".noinstr.text");
4250
if (sec) {
4251
warnings += validate_section(file, sec);
4252
warnings += validate_unwind_hints(file, sec);
4253
}
4254
4255
sec = find_section_by_name(file->elf, ".entry.text");
4256
if (sec) {
4257
warnings += validate_section(file, sec);
4258
warnings += validate_unwind_hints(file, sec);
4259
}
4260
4261
sec = find_section_by_name(file->elf, ".cpuidle.text");
4262
if (sec) {
4263
warnings += validate_section(file, sec);
4264
warnings += validate_unwind_hints(file, sec);
4265
}
4266
4267
return warnings;
4268
}
4269
4270
static int validate_functions(struct objtool_file *file)
4271
{
4272
struct section *sec;
4273
int warnings = 0;
4274
4275
for_each_sec(file, sec) {
4276
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4277
continue;
4278
4279
warnings += validate_section(file, sec);
4280
}
4281
4282
return warnings;
4283
}
4284
4285
static void mark_endbr_used(struct instruction *insn)
4286
{
4287
if (!list_empty(&insn->call_node))
4288
list_del_init(&insn->call_node);
4289
}
4290
4291
static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4292
{
4293
struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4294
struct instruction *first;
4295
4296
if (!sym)
4297
return false;
4298
4299
first = find_insn(file, sym->sec, sym->offset);
4300
if (!first)
4301
return false;
4302
4303
if (first->type != INSN_ENDBR && !first->noendbr)
4304
return false;
4305
4306
return insn->offset == sym->offset + sym->len;
4307
}
4308
4309
static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4310
struct instruction *dest)
4311
{
4312
if (dest->type == INSN_ENDBR) {
4313
mark_endbr_used(dest);
4314
return 0;
4315
}
4316
4317
if (insn_func(dest) && insn_func(insn) &&
4318
insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4319
/*
4320
* Anything from->to self is either _THIS_IP_ or
4321
* IRET-to-self.
4322
*
4323
* There is no sane way to annotate _THIS_IP_ since the
4324
* compiler treats the relocation as a constant and is
4325
* happy to fold in offsets, skewing any annotation we
4326
* do, leading to vast amounts of false-positives.
4327
*
4328
* There's also compiler generated _THIS_IP_ through
4329
* KCOV and such which we have no hope of annotating.
4330
*
4331
* As such, blanket accept self-references without
4332
* issue.
4333
*/
4334
return 0;
4335
}
4336
4337
/*
4338
* Accept anything ANNOTATE_NOENDBR.
4339
*/
4340
if (dest->noendbr)
4341
return 0;
4342
4343
/*
4344
* Accept if this is the instruction after a symbol
4345
* that is (no)endbr -- typical code-range usage.
4346
*/
4347
if (noendbr_range(file, dest))
4348
return 0;
4349
4350
WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4351
return 1;
4352
}
4353
4354
static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4355
{
4356
struct instruction *dest;
4357
struct reloc *reloc;
4358
unsigned long off;
4359
int warnings = 0;
4360
4361
/*
4362
* Looking for function pointer load relocations. Ignore
4363
* direct/indirect branches:
4364
*/
4365
switch (insn->type) {
4366
4367
case INSN_CALL:
4368
case INSN_CALL_DYNAMIC:
4369
case INSN_JUMP_CONDITIONAL:
4370
case INSN_JUMP_UNCONDITIONAL:
4371
case INSN_JUMP_DYNAMIC:
4372
case INSN_JUMP_DYNAMIC_CONDITIONAL:
4373
case INSN_RETURN:
4374
case INSN_NOP:
4375
return 0;
4376
4377
case INSN_LEA_RIP:
4378
if (!insn_reloc(file, insn)) {
4379
/* local function pointer reference without reloc */
4380
4381
off = arch_jump_destination(insn);
4382
4383
dest = find_insn(file, insn->sec, off);
4384
if (!dest) {
4385
WARN_INSN(insn, "corrupt function pointer reference");
4386
return 1;
4387
}
4388
4389
return __validate_ibt_insn(file, insn, dest);
4390
}
4391
break;
4392
4393
default:
4394
break;
4395
}
4396
4397
for (reloc = insn_reloc(file, insn);
4398
reloc;
4399
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4400
reloc_offset(reloc) + 1,
4401
(insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4402
4403
off = reloc->sym->offset;
4404
if (reloc_type(reloc) == R_X86_64_PC32 ||
4405
reloc_type(reloc) == R_X86_64_PLT32)
4406
off += arch_dest_reloc_offset(reloc_addend(reloc));
4407
else
4408
off += reloc_addend(reloc);
4409
4410
dest = find_insn(file, reloc->sym->sec, off);
4411
if (!dest)
4412
continue;
4413
4414
warnings += __validate_ibt_insn(file, insn, dest);
4415
}
4416
4417
return warnings;
4418
}
4419
4420
static int validate_ibt_data_reloc(struct objtool_file *file,
4421
struct reloc *reloc)
4422
{
4423
struct instruction *dest;
4424
4425
dest = find_insn(file, reloc->sym->sec,
4426
reloc->sym->offset + reloc_addend(reloc));
4427
if (!dest)
4428
return 0;
4429
4430
if (dest->type == INSN_ENDBR) {
4431
mark_endbr_used(dest);
4432
return 0;
4433
}
4434
4435
if (dest->noendbr)
4436
return 0;
4437
4438
WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4439
"data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4440
4441
return 1;
4442
}
4443
4444
/*
4445
* Validate IBT rules and remove used ENDBR instructions from the seal list.
4446
* Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4447
* NOPs) later, in create_ibt_endbr_seal_sections().
4448
*/
4449
static int validate_ibt(struct objtool_file *file)
4450
{
4451
struct section *sec;
4452
struct reloc *reloc;
4453
struct instruction *insn;
4454
int warnings = 0;
4455
4456
for_each_insn(file, insn)
4457
warnings += validate_ibt_insn(file, insn);
4458
4459
for_each_sec(file, sec) {
4460
4461
/* Already done by validate_ibt_insn() */
4462
if (sec->sh.sh_flags & SHF_EXECINSTR)
4463
continue;
4464
4465
if (!sec->rsec)
4466
continue;
4467
4468
/*
4469
* These sections can reference text addresses, but not with
4470
* the intent to indirect branch to them.
4471
*/
4472
if ((!strncmp(sec->name, ".discard", 8) &&
4473
strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4474
!strncmp(sec->name, ".debug", 6) ||
4475
!strcmp(sec->name, ".altinstructions") ||
4476
!strcmp(sec->name, ".ibt_endbr_seal") ||
4477
!strcmp(sec->name, ".orc_unwind_ip") ||
4478
!strcmp(sec->name, ".parainstructions") ||
4479
!strcmp(sec->name, ".retpoline_sites") ||
4480
!strcmp(sec->name, ".smp_locks") ||
4481
!strcmp(sec->name, ".static_call_sites") ||
4482
!strcmp(sec->name, "_error_injection_whitelist") ||
4483
!strcmp(sec->name, "_kprobe_blacklist") ||
4484
!strcmp(sec->name, "__bug_table") ||
4485
!strcmp(sec->name, "__ex_table") ||
4486
!strcmp(sec->name, "__jump_table") ||
4487
!strcmp(sec->name, "__mcount_loc") ||
4488
!strcmp(sec->name, ".kcfi_traps") ||
4489
!strcmp(sec->name, ".llvm.call-graph-profile") ||
4490
!strcmp(sec->name, ".llvm_bb_addr_map") ||
4491
!strcmp(sec->name, "__tracepoints") ||
4492
strstr(sec->name, "__patchable_function_entries"))
4493
continue;
4494
4495
for_each_reloc(sec->rsec, reloc)
4496
warnings += validate_ibt_data_reloc(file, reloc);
4497
}
4498
4499
return warnings;
4500
}
4501
4502
static int validate_sls(struct objtool_file *file)
4503
{
4504
struct instruction *insn, *next_insn;
4505
int warnings = 0;
4506
4507
for_each_insn(file, insn) {
4508
next_insn = next_insn_same_sec(file, insn);
4509
4510
if (insn->retpoline_safe)
4511
continue;
4512
4513
switch (insn->type) {
4514
case INSN_RETURN:
4515
if (!next_insn || next_insn->type != INSN_TRAP) {
4516
WARN_INSN(insn, "missing int3 after ret");
4517
warnings++;
4518
}
4519
4520
break;
4521
case INSN_JUMP_DYNAMIC:
4522
if (!next_insn || next_insn->type != INSN_TRAP) {
4523
WARN_INSN(insn, "missing int3 after indirect jump");
4524
warnings++;
4525
}
4526
break;
4527
default:
4528
break;
4529
}
4530
}
4531
4532
return warnings;
4533
}
4534
4535
static int validate_reachable_instructions(struct objtool_file *file)
4536
{
4537
struct instruction *insn, *prev_insn;
4538
struct symbol *call_dest;
4539
int warnings = 0;
4540
4541
if (file->ignore_unreachables)
4542
return 0;
4543
4544
for_each_insn(file, insn) {
4545
if (insn->visited || ignore_unreachable_insn(file, insn))
4546
continue;
4547
4548
prev_insn = prev_insn_same_sec(file, insn);
4549
if (prev_insn && prev_insn->dead_end) {
4550
call_dest = insn_call_dest(prev_insn);
4551
if (call_dest) {
4552
WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4553
call_dest->name);
4554
warnings++;
4555
continue;
4556
}
4557
}
4558
4559
WARN_INSN(insn, "unreachable instruction");
4560
warnings++;
4561
}
4562
4563
return warnings;
4564
}
4565
4566
/* 'funcs' is a space-separated list of function names */
4567
static void disas_funcs(const char *funcs)
4568
{
4569
const char *objdump_str, *cross_compile;
4570
int size, ret;
4571
char *cmd;
4572
4573
cross_compile = getenv("CROSS_COMPILE");
4574
if (!cross_compile)
4575
cross_compile = "";
4576
4577
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
4578
"BEGIN { split(_funcs, funcs); }"
4579
"/^$/ { func_match = 0; }"
4580
"/<.*>:/ { "
4581
"f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
4582
"for (i in funcs) {"
4583
"if (funcs[i] == f) {"
4584
"func_match = 1;"
4585
"base = strtonum(\"0x\" $1);"
4586
"break;"
4587
"}"
4588
"}"
4589
"}"
4590
"{"
4591
"if (func_match) {"
4592
"addr = strtonum(\"0x\" $1);"
4593
"printf(\"%%04x \", addr - base);"
4594
"print;"
4595
"}"
4596
"}' 1>&2";
4597
4598
/* fake snprintf() to calculate the size */
4599
size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
4600
if (size <= 0) {
4601
WARN("objdump string size calculation failed");
4602
return;
4603
}
4604
4605
cmd = malloc(size);
4606
4607
/* real snprintf() */
4608
snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
4609
ret = system(cmd);
4610
if (ret) {
4611
WARN("disassembly failed: %d", ret);
4612
return;
4613
}
4614
}
4615
4616
static void disas_warned_funcs(struct objtool_file *file)
4617
{
4618
struct symbol *sym;
4619
char *funcs = NULL, *tmp;
4620
4621
for_each_sym(file, sym) {
4622
if (sym->warned) {
4623
if (!funcs) {
4624
funcs = malloc(strlen(sym->name) + 1);
4625
if (!funcs) {
4626
ERROR_GLIBC("malloc");
4627
return;
4628
}
4629
strcpy(funcs, sym->name);
4630
} else {
4631
tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
4632
if (!tmp) {
4633
ERROR_GLIBC("malloc");
4634
return;
4635
}
4636
sprintf(tmp, "%s %s", funcs, sym->name);
4637
free(funcs);
4638
funcs = tmp;
4639
}
4640
}
4641
}
4642
4643
if (funcs)
4644
disas_funcs(funcs);
4645
}
4646
4647
struct insn_chunk {
4648
void *addr;
4649
struct insn_chunk *next;
4650
};
4651
4652
/*
4653
* Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4654
* which can trigger more allocations for .debug_* sections whose data hasn't
4655
* been read yet.
4656
*/
4657
static void free_insns(struct objtool_file *file)
4658
{
4659
struct instruction *insn;
4660
struct insn_chunk *chunks = NULL, *chunk;
4661
4662
for_each_insn(file, insn) {
4663
if (!insn->idx) {
4664
chunk = malloc(sizeof(*chunk));
4665
chunk->addr = insn;
4666
chunk->next = chunks;
4667
chunks = chunk;
4668
}
4669
}
4670
4671
for (chunk = chunks; chunk; chunk = chunk->next)
4672
free(chunk->addr);
4673
}
4674
4675
int check(struct objtool_file *file)
4676
{
4677
int ret = 0, warnings = 0;
4678
4679
arch_initial_func_cfi_state(&initial_func_cfi);
4680
init_cfi_state(&init_cfi);
4681
init_cfi_state(&func_cfi);
4682
set_func_state(&func_cfi);
4683
init_cfi_state(&force_undefined_cfi);
4684
force_undefined_cfi.force_undefined = true;
4685
4686
if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4687
ret = -1;
4688
goto out;
4689
}
4690
4691
cfi_hash_add(&init_cfi);
4692
cfi_hash_add(&func_cfi);
4693
4694
ret = decode_sections(file);
4695
if (ret)
4696
goto out;
4697
4698
if (!nr_insns)
4699
goto out;
4700
4701
if (opts.retpoline)
4702
warnings += validate_retpoline(file);
4703
4704
if (opts.stackval || opts.orc || opts.uaccess) {
4705
int w = 0;
4706
4707
w += validate_functions(file);
4708
w += validate_unwind_hints(file, NULL);
4709
if (!w)
4710
w += validate_reachable_instructions(file);
4711
4712
warnings += w;
4713
4714
} else if (opts.noinstr) {
4715
warnings += validate_noinstr_sections(file);
4716
}
4717
4718
if (opts.unret) {
4719
/*
4720
* Must be after validate_branch() and friends, it plays
4721
* further games with insn->visited.
4722
*/
4723
warnings += validate_unrets(file);
4724
}
4725
4726
if (opts.ibt)
4727
warnings += validate_ibt(file);
4728
4729
if (opts.sls)
4730
warnings += validate_sls(file);
4731
4732
if (opts.static_call) {
4733
ret = create_static_call_sections(file);
4734
if (ret)
4735
goto out;
4736
}
4737
4738
if (opts.retpoline) {
4739
ret = create_retpoline_sites_sections(file);
4740
if (ret)
4741
goto out;
4742
}
4743
4744
if (opts.cfi) {
4745
ret = create_cfi_sections(file);
4746
if (ret)
4747
goto out;
4748
}
4749
4750
if (opts.rethunk) {
4751
ret = create_return_sites_sections(file);
4752
if (ret)
4753
goto out;
4754
4755
if (opts.hack_skylake) {
4756
ret = create_direct_call_sections(file);
4757
if (ret)
4758
goto out;
4759
}
4760
}
4761
4762
if (opts.mcount) {
4763
ret = create_mcount_loc_sections(file);
4764
if (ret)
4765
goto out;
4766
}
4767
4768
if (opts.prefix) {
4769
ret = add_prefix_symbols(file);
4770
if (ret)
4771
goto out;
4772
}
4773
4774
if (opts.ibt) {
4775
ret = create_ibt_endbr_seal_sections(file);
4776
if (ret)
4777
goto out;
4778
}
4779
4780
if (opts.orc && nr_insns) {
4781
ret = orc_create(file);
4782
if (ret)
4783
goto out;
4784
}
4785
4786
free_insns(file);
4787
4788
if (opts.stats) {
4789
printf("nr_insns_visited: %ld\n", nr_insns_visited);
4790
printf("nr_cfi: %ld\n", nr_cfi);
4791
printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4792
printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4793
}
4794
4795
out:
4796
if (!ret && !warnings)
4797
return 0;
4798
4799
if (opts.werror && warnings)
4800
ret = 1;
4801
4802
if (opts.verbose) {
4803
if (opts.werror && warnings)
4804
WARN("%d warning(s) upgraded to errors", warnings);
4805
print_args();
4806
disas_warned_funcs(file);
4807
}
4808
4809
return ret;
4810
}
4811
4812