Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/objtool/check.c
49041 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2015-2017 Josh Poimboeuf <[email protected]>
4
*/
5
6
#define _GNU_SOURCE /* memmem() */
7
#include <fnmatch.h>
8
#include <string.h>
9
#include <stdlib.h>
10
#include <inttypes.h>
11
#include <sys/mman.h>
12
13
#include <objtool/builtin.h>
14
#include <objtool/cfi.h>
15
#include <objtool/arch.h>
16
#include <objtool/disas.h>
17
#include <objtool/check.h>
18
#include <objtool/special.h>
19
#include <objtool/trace.h>
20
#include <objtool/warn.h>
21
#include <objtool/checksum.h>
22
#include <objtool/util.h>
23
24
#include <linux/objtool_types.h>
25
#include <linux/hashtable.h>
26
#include <linux/kernel.h>
27
#include <linux/static_call_types.h>
28
#include <linux/string.h>
29
30
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32
static struct cfi_init_state initial_func_cfi;
33
static struct cfi_state init_cfi;
34
static struct cfi_state func_cfi;
35
static struct cfi_state force_undefined_cfi;
36
37
struct disas_context *objtool_disas_ctx;
38
39
size_t sym_name_max_len;
40
41
struct instruction *find_insn(struct objtool_file *file,
42
struct section *sec, unsigned long offset)
43
{
44
struct instruction *insn;
45
46
hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
47
if (insn->sec == sec && insn->offset == offset)
48
return insn;
49
}
50
51
return NULL;
52
}
53
54
struct instruction *next_insn_same_sec(struct objtool_file *file,
55
struct instruction *insn)
56
{
57
if (insn->idx == INSN_CHUNK_MAX)
58
return find_insn(file, insn->sec, insn->offset + insn->len);
59
60
insn++;
61
if (!insn->len)
62
return NULL;
63
64
return insn;
65
}
66
67
static struct instruction *next_insn_same_func(struct objtool_file *file,
68
struct instruction *insn)
69
{
70
struct instruction *next = next_insn_same_sec(file, insn);
71
struct symbol *func = insn_func(insn);
72
73
if (!func)
74
return NULL;
75
76
if (next && insn_func(next) == func)
77
return next;
78
79
/* Check if we're already in the subfunction: */
80
if (func == func->cfunc)
81
return NULL;
82
83
/* Move to the subfunction: */
84
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
85
}
86
87
static struct instruction *prev_insn_same_sec(struct objtool_file *file,
88
struct instruction *insn)
89
{
90
if (insn->idx == 0) {
91
if (insn->prev_len)
92
return find_insn(file, insn->sec, insn->offset - insn->prev_len);
93
return NULL;
94
}
95
96
return insn - 1;
97
}
98
99
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
100
struct instruction *insn)
101
{
102
struct instruction *prev = prev_insn_same_sec(file, insn);
103
104
if (prev && insn_func(prev) == insn_func(insn))
105
return prev;
106
107
return NULL;
108
}
109
110
#define for_each_insn(file, insn) \
111
for (struct section *__sec, *__fake = (struct section *)1; \
112
__fake; __fake = NULL) \
113
for_each_sec(file->elf, __sec) \
114
sec_for_each_insn(file, __sec, insn)
115
116
#define func_for_each_insn(file, func, insn) \
117
for (insn = find_insn(file, func->sec, func->offset); \
118
insn; \
119
insn = next_insn_same_func(file, insn))
120
121
#define sym_for_each_insn(file, sym, insn) \
122
for (insn = find_insn(file, sym->sec, sym->offset); \
123
insn && insn->offset < sym->offset + sym->len; \
124
insn = next_insn_same_sec(file, insn))
125
126
#define sym_for_each_insn_continue_reverse(file, sym, insn) \
127
for (insn = prev_insn_same_sec(file, insn); \
128
insn && insn->offset >= sym->offset; \
129
insn = prev_insn_same_sec(file, insn))
130
131
#define sec_for_each_insn_from(file, insn) \
132
for (; insn; insn = next_insn_same_sec(file, insn))
133
134
#define sec_for_each_insn_continue(file, insn) \
135
for (insn = next_insn_same_sec(file, insn); insn; \
136
insn = next_insn_same_sec(file, insn))
137
138
static inline struct reloc *insn_jump_table(struct instruction *insn)
139
{
140
if (insn->type == INSN_JUMP_DYNAMIC ||
141
insn->type == INSN_CALL_DYNAMIC)
142
return insn->_jump_table;
143
144
return NULL;
145
}
146
147
static inline unsigned long insn_jump_table_size(struct instruction *insn)
148
{
149
if (insn->type == INSN_JUMP_DYNAMIC ||
150
insn->type == INSN_CALL_DYNAMIC)
151
return insn->_jump_table_size;
152
153
return 0;
154
}
155
156
static bool is_jump_table_jump(struct instruction *insn)
157
{
158
struct alt_group *alt_group = insn->alt_group;
159
160
if (insn_jump_table(insn))
161
return true;
162
163
/* Retpoline alternative for a jump table? */
164
return alt_group && alt_group->orig_group &&
165
insn_jump_table(alt_group->orig_group->first_insn);
166
}
167
168
static bool is_sibling_call(struct instruction *insn)
169
{
170
/*
171
* Assume only STT_FUNC calls have jump-tables.
172
*/
173
if (insn_func(insn)) {
174
/* An indirect jump is either a sibling call or a jump to a table. */
175
if (insn->type == INSN_JUMP_DYNAMIC)
176
return !is_jump_table_jump(insn);
177
}
178
179
/* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
180
return (is_static_jump(insn) && insn_call_dest(insn));
181
}
182
183
/*
184
* Checks if a function is a Rust "noreturn" one.
185
*/
186
static bool is_rust_noreturn(const struct symbol *func)
187
{
188
/*
189
* If it does not start with "_R", then it is not a Rust symbol.
190
*/
191
if (strncmp(func->name, "_R", 2))
192
return false;
193
194
/*
195
* These are just heuristics -- we do not control the precise symbol
196
* name, due to the crate disambiguators (which depend on the compiler)
197
* as well as changes to the source code itself between versions (since
198
* these come from the Rust standard library).
199
*/
200
return str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") ||
201
str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
202
str_ends_with(func->name, "_4core6option13expect_failed") ||
203
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
204
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
205
str_ends_with(func->name, "_4core9panicking5panic") ||
206
str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
207
str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
208
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
209
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
210
str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
211
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
212
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
213
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
214
str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
215
strstr(func->name, "_4core9panicking13assert_failed") ||
216
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
217
(strstr(func->name, "_4core5slice5index") &&
218
strstr(func->name, "slice_") &&
219
str_ends_with(func->name, "_fail"));
220
}
221
222
/*
223
* This checks to see if the given function is a "noreturn" function.
224
*
225
* For global functions which are outside the scope of this object file, we
226
* have to keep a manual list of them.
227
*
228
* For local functions, we have to detect them manually by simply looking for
229
* the lack of a return instruction.
230
*/
231
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
232
int recursion)
233
{
234
int i;
235
struct instruction *insn;
236
bool empty = true;
237
238
#define NORETURN(func) __stringify(func),
239
static const char * const global_noreturns[] = {
240
#include "noreturns.h"
241
};
242
#undef NORETURN
243
244
if (!func)
245
return false;
246
247
if (!is_local_sym(func)) {
248
if (is_rust_noreturn(func))
249
return true;
250
251
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
252
if (!strcmp(func->name, global_noreturns[i]))
253
return true;
254
}
255
256
if (is_weak_sym(func))
257
return false;
258
259
if (!func->len)
260
return false;
261
262
insn = find_insn(file, func->sec, func->offset);
263
if (!insn || !insn_func(insn))
264
return false;
265
266
func_for_each_insn(file, func, insn) {
267
empty = false;
268
269
if (insn->type == INSN_RETURN)
270
return false;
271
}
272
273
if (empty)
274
return false;
275
276
/*
277
* A function can have a sibling call instead of a return. In that
278
* case, the function's dead-end status depends on whether the target
279
* of the sibling call returns.
280
*/
281
func_for_each_insn(file, func, insn) {
282
if (is_sibling_call(insn)) {
283
struct instruction *dest = insn->jump_dest;
284
285
if (!dest)
286
/* sibling call to another file */
287
return false;
288
289
/* local sibling call */
290
if (recursion == 5) {
291
/*
292
* Infinite recursion: two functions have
293
* sibling calls to each other. This is a very
294
* rare case. It means they aren't dead ends.
295
*/
296
return false;
297
}
298
299
return __dead_end_function(file, insn_func(dest), recursion+1);
300
}
301
}
302
303
return true;
304
}
305
306
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
307
{
308
return __dead_end_function(file, func, 0);
309
}
310
311
static void init_cfi_state(struct cfi_state *cfi)
312
{
313
int i;
314
315
for (i = 0; i < CFI_NUM_REGS; i++) {
316
cfi->regs[i].base = CFI_UNDEFINED;
317
cfi->vals[i].base = CFI_UNDEFINED;
318
}
319
cfi->cfa.base = CFI_UNDEFINED;
320
cfi->drap_reg = CFI_UNDEFINED;
321
cfi->drap_offset = -1;
322
}
323
324
static void init_insn_state(struct objtool_file *file, struct insn_state *state,
325
struct section *sec)
326
{
327
memset(state, 0, sizeof(*state));
328
init_cfi_state(&state->cfi);
329
330
if (opts.noinstr && sec)
331
state->noinstr = sec->noinstr;
332
}
333
334
static struct cfi_state *cfi_alloc(void)
335
{
336
struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
337
if (!cfi) {
338
ERROR_GLIBC("calloc");
339
exit(1);
340
}
341
nr_cfi++;
342
return cfi;
343
}
344
345
static int cfi_bits;
346
static struct hlist_head *cfi_hash;
347
348
static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
349
{
350
return memcmp((void *)cfi1 + sizeof(cfi1->hash),
351
(void *)cfi2 + sizeof(cfi2->hash),
352
sizeof(struct cfi_state) - sizeof(struct hlist_node));
353
}
354
355
static inline u32 cfi_key(struct cfi_state *cfi)
356
{
357
return jhash((void *)cfi + sizeof(cfi->hash),
358
sizeof(*cfi) - sizeof(cfi->hash), 0);
359
}
360
361
static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
362
{
363
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
364
struct cfi_state *obj;
365
366
hlist_for_each_entry(obj, head, hash) {
367
if (!cficmp(cfi, obj)) {
368
nr_cfi_cache++;
369
return obj;
370
}
371
}
372
373
obj = cfi_alloc();
374
*obj = *cfi;
375
hlist_add_head(&obj->hash, head);
376
377
return obj;
378
}
379
380
static void cfi_hash_add(struct cfi_state *cfi)
381
{
382
struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
383
384
hlist_add_head(&cfi->hash, head);
385
}
386
387
static void *cfi_hash_alloc(unsigned long size)
388
{
389
cfi_bits = max(10, ilog2(size));
390
cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
391
PROT_READ|PROT_WRITE,
392
MAP_PRIVATE|MAP_ANON, -1, 0);
393
if (cfi_hash == (void *)-1L) {
394
ERROR_GLIBC("mmap fail cfi_hash");
395
cfi_hash = NULL;
396
} else if (opts.stats) {
397
printf("cfi_bits: %d\n", cfi_bits);
398
}
399
400
return cfi_hash;
401
}
402
403
static unsigned long nr_insns;
404
static unsigned long nr_insns_visited;
405
406
/*
407
* Call the arch-specific instruction decoder for all the instructions and add
408
* them to the global instruction list.
409
*/
410
static int decode_instructions(struct objtool_file *file)
411
{
412
struct section *sec;
413
struct symbol *func;
414
unsigned long offset;
415
struct instruction *insn;
416
417
for_each_sec(file->elf, sec) {
418
struct instruction *insns = NULL;
419
u8 prev_len = 0;
420
u8 idx = 0;
421
422
if (!is_text_sec(sec))
423
continue;
424
425
if (strcmp(sec->name, ".altinstr_replacement") &&
426
strcmp(sec->name, ".altinstr_aux") &&
427
strncmp(sec->name, ".discard.", 9))
428
sec->text = true;
429
430
if (!strcmp(sec->name, ".noinstr.text") ||
431
!strcmp(sec->name, ".entry.text") ||
432
!strcmp(sec->name, ".cpuidle.text") ||
433
!strncmp(sec->name, ".text..__x86.", 13))
434
sec->noinstr = true;
435
436
/*
437
* .init.text code is ran before userspace and thus doesn't
438
* strictly need retpolines, except for modules which are
439
* loaded late, they very much do need retpoline in their
440
* .init.text
441
*/
442
if (!strcmp(sec->name, ".init.text") && !opts.module)
443
sec->init = true;
444
445
for (offset = 0; offset < sec_size(sec); offset += insn->len) {
446
if (!insns || idx == INSN_CHUNK_MAX) {
447
insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
448
if (!insns) {
449
ERROR_GLIBC("calloc");
450
return -1;
451
}
452
idx = 0;
453
} else {
454
idx++;
455
}
456
insn = &insns[idx];
457
insn->idx = idx;
458
459
INIT_LIST_HEAD(&insn->call_node);
460
insn->sec = sec;
461
insn->offset = offset;
462
insn->prev_len = prev_len;
463
464
if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
465
return -1;
466
467
prev_len = insn->len;
468
469
/*
470
* By default, "ud2" is a dead end unless otherwise
471
* annotated, because GCC 7 inserts it for certain
472
* divide-by-zero cases.
473
*/
474
if (insn->type == INSN_BUG)
475
insn->dead_end = true;
476
477
hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
478
nr_insns++;
479
}
480
481
sec_for_each_sym(sec, func) {
482
if (!is_notype_sym(func) && !is_func_sym(func))
483
continue;
484
485
if (func->offset == sec_size(sec)) {
486
/* Heuristic: likely an "end" symbol */
487
if (is_notype_sym(func))
488
continue;
489
ERROR("%s(): STT_FUNC at end of section", func->name);
490
return -1;
491
}
492
493
if (func->embedded_insn || func->alias != func)
494
continue;
495
496
if (!find_insn(file, sec, func->offset)) {
497
ERROR("%s(): can't find starting instruction", func->name);
498
return -1;
499
}
500
501
sym_for_each_insn(file, func, insn) {
502
insn->sym = func;
503
if (is_func_sym(func) &&
504
insn->type == INSN_ENDBR &&
505
list_empty(&insn->call_node)) {
506
if (insn->offset == func->offset) {
507
list_add_tail(&insn->call_node, &file->endbr_list);
508
file->nr_endbr++;
509
} else {
510
file->nr_endbr_int++;
511
}
512
}
513
}
514
}
515
}
516
517
if (opts.stats)
518
printf("nr_insns: %lu\n", nr_insns);
519
520
return 0;
521
}
522
523
/*
524
* Read the pv_ops[] .data table to find the static initialized values.
525
*/
526
static int add_pv_ops(struct objtool_file *file, const char *symname)
527
{
528
struct symbol *sym, *func;
529
unsigned long off, end;
530
struct reloc *reloc;
531
int idx;
532
533
sym = find_symbol_by_name(file->elf, symname);
534
if (!sym)
535
return 0;
536
537
off = sym->offset;
538
end = off + sym->len;
539
for (;;) {
540
reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
541
if (!reloc)
542
break;
543
544
idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
545
546
func = reloc->sym;
547
if (is_sec_sym(func))
548
func = find_symbol_by_offset(reloc->sym->sec,
549
reloc_addend(reloc));
550
if (!func) {
551
ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
552
"can't find func at %s[%d]", symname, idx);
553
return -1;
554
}
555
556
if (objtool_pv_add(file, idx, func))
557
return -1;
558
559
off = reloc_offset(reloc) + 1;
560
if (off > end)
561
break;
562
}
563
564
return 0;
565
}
566
567
/*
568
* Allocate and initialize file->pv_ops[].
569
*/
570
static int init_pv_ops(struct objtool_file *file)
571
{
572
static const char *pv_ops_tables[] = {
573
"pv_ops",
574
"xen_cpu_ops",
575
"xen_irq_ops",
576
"xen_mmu_ops",
577
NULL,
578
};
579
const char *pv_ops;
580
struct symbol *sym;
581
int idx, nr;
582
583
if (!opts.noinstr)
584
return 0;
585
586
file->pv_ops = NULL;
587
588
sym = find_symbol_by_name(file->elf, "pv_ops");
589
if (!sym)
590
return 0;
591
592
nr = sym->len / sizeof(unsigned long);
593
file->pv_ops = calloc(nr, sizeof(struct pv_state));
594
if (!file->pv_ops) {
595
ERROR_GLIBC("calloc");
596
return -1;
597
}
598
599
for (idx = 0; idx < nr; idx++)
600
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
601
602
for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
603
if (add_pv_ops(file, pv_ops))
604
return -1;
605
}
606
607
return 0;
608
}
609
610
static bool is_livepatch_module(struct objtool_file *file)
611
{
612
struct section *sec;
613
614
if (!opts.module)
615
return false;
616
617
sec = find_section_by_name(file->elf, ".modinfo");
618
if (!sec)
619
return false;
620
621
return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
622
}
623
624
static int create_static_call_sections(struct objtool_file *file)
625
{
626
struct static_call_site *site;
627
struct section *sec;
628
struct instruction *insn;
629
struct symbol *key_sym;
630
char *key_name, *tmp;
631
int idx;
632
633
sec = find_section_by_name(file->elf, ".static_call_sites");
634
if (sec) {
635
/*
636
* Livepatch modules may have already extracted the static call
637
* site entries to take advantage of vmlinux static call
638
* privileges.
639
*/
640
if (!file->klp)
641
WARN("file already has .static_call_sites section, skipping");
642
643
return 0;
644
}
645
646
if (list_empty(&file->static_call_list))
647
return 0;
648
649
idx = 0;
650
list_for_each_entry(insn, &file->static_call_list, call_node)
651
idx++;
652
653
sec = elf_create_section_pair(file->elf, ".static_call_sites",
654
sizeof(*site), idx, idx * 2);
655
if (!sec)
656
return -1;
657
658
/* Allow modules to modify the low bits of static_call_site::key */
659
sec->sh.sh_flags |= SHF_WRITE;
660
661
idx = 0;
662
list_for_each_entry(insn, &file->static_call_list, call_node) {
663
664
/* populate reloc for 'addr' */
665
if (!elf_init_reloc_text_sym(file->elf, sec,
666
idx * sizeof(*site), idx * 2,
667
insn->sec, insn->offset))
668
return -1;
669
670
/* find key symbol */
671
key_name = strdup(insn_call_dest(insn)->name);
672
if (!key_name) {
673
ERROR_GLIBC("strdup");
674
return -1;
675
}
676
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
677
STATIC_CALL_TRAMP_PREFIX_LEN)) {
678
ERROR("static_call: trampoline name malformed: %s", key_name);
679
return -1;
680
}
681
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
682
memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
683
684
key_sym = find_symbol_by_name(file->elf, tmp);
685
if (!key_sym) {
686
if (!opts.module) {
687
ERROR("static_call: can't find static_call_key symbol: %s", tmp);
688
return -1;
689
}
690
691
/*
692
* For modules(), the key might not be exported, which
693
* means the module can make static calls but isn't
694
* allowed to change them.
695
*
696
* In that case we temporarily set the key to be the
697
* trampoline address. This is fixed up in
698
* static_call_add_module().
699
*/
700
key_sym = insn_call_dest(insn);
701
}
702
703
/* populate reloc for 'key' */
704
if (!elf_init_reloc_data_sym(file->elf, sec,
705
idx * sizeof(*site) + 4,
706
(idx * 2) + 1, key_sym,
707
is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
708
return -1;
709
710
idx++;
711
}
712
713
return 0;
714
}
715
716
static int create_retpoline_sites_sections(struct objtool_file *file)
717
{
718
struct instruction *insn;
719
struct section *sec;
720
int idx;
721
722
sec = find_section_by_name(file->elf, ".retpoline_sites");
723
if (sec) {
724
WARN("file already has .retpoline_sites, skipping");
725
return 0;
726
}
727
728
idx = 0;
729
list_for_each_entry(insn, &file->retpoline_call_list, call_node)
730
idx++;
731
732
if (!idx)
733
return 0;
734
735
sec = elf_create_section_pair(file->elf, ".retpoline_sites",
736
sizeof(int), idx, idx);
737
if (!sec)
738
return -1;
739
740
idx = 0;
741
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
742
743
if (!elf_init_reloc_text_sym(file->elf, sec,
744
idx * sizeof(int), idx,
745
insn->sec, insn->offset))
746
return -1;
747
748
idx++;
749
}
750
751
return 0;
752
}
753
754
static int create_return_sites_sections(struct objtool_file *file)
755
{
756
struct instruction *insn;
757
struct section *sec;
758
int idx;
759
760
sec = find_section_by_name(file->elf, ".return_sites");
761
if (sec) {
762
WARN("file already has .return_sites, skipping");
763
return 0;
764
}
765
766
idx = 0;
767
list_for_each_entry(insn, &file->return_thunk_list, call_node)
768
idx++;
769
770
if (!idx)
771
return 0;
772
773
sec = elf_create_section_pair(file->elf, ".return_sites",
774
sizeof(int), idx, idx);
775
if (!sec)
776
return -1;
777
778
idx = 0;
779
list_for_each_entry(insn, &file->return_thunk_list, call_node) {
780
781
if (!elf_init_reloc_text_sym(file->elf, sec,
782
idx * sizeof(int), idx,
783
insn->sec, insn->offset))
784
return -1;
785
786
idx++;
787
}
788
789
return 0;
790
}
791
792
static int create_ibt_endbr_seal_sections(struct objtool_file *file)
793
{
794
struct instruction *insn;
795
struct section *sec;
796
int idx;
797
798
sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
799
if (sec) {
800
WARN("file already has .ibt_endbr_seal, skipping");
801
return 0;
802
}
803
804
idx = 0;
805
list_for_each_entry(insn, &file->endbr_list, call_node)
806
idx++;
807
808
if (opts.stats) {
809
printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
810
printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
811
printf("ibt: superfluous ENDBR: %d\n", idx);
812
}
813
814
if (!idx)
815
return 0;
816
817
sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
818
sizeof(int), idx, idx);
819
if (!sec)
820
return -1;
821
822
idx = 0;
823
list_for_each_entry(insn, &file->endbr_list, call_node) {
824
825
int *site = (int *)sec->data->d_buf + idx;
826
struct symbol *sym = insn->sym;
827
*site = 0;
828
829
if (opts.module && sym && is_func_sym(sym) &&
830
insn->offset == sym->offset &&
831
(!strcmp(sym->name, "init_module") ||
832
!strcmp(sym->name, "cleanup_module"))) {
833
ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
834
sym->name);
835
return -1;
836
}
837
838
if (!elf_init_reloc_text_sym(file->elf, sec,
839
idx * sizeof(int), idx,
840
insn->sec, insn->offset))
841
return -1;
842
843
idx++;
844
}
845
846
return 0;
847
}
848
849
static int create_cfi_sections(struct objtool_file *file)
850
{
851
struct section *sec;
852
struct symbol *sym;
853
int idx;
854
855
sec = find_section_by_name(file->elf, ".cfi_sites");
856
if (sec) {
857
WARN("file already has .cfi_sites section, skipping");
858
return 0;
859
}
860
861
idx = 0;
862
for_each_sym(file->elf, sym) {
863
if (!is_func_sym(sym))
864
continue;
865
866
if (strncmp(sym->name, "__cfi_", 6))
867
continue;
868
869
idx++;
870
}
871
872
sec = elf_create_section_pair(file->elf, ".cfi_sites",
873
sizeof(unsigned int), idx, idx);
874
if (!sec)
875
return -1;
876
877
idx = 0;
878
for_each_sym(file->elf, sym) {
879
if (!is_func_sym(sym))
880
continue;
881
882
if (strncmp(sym->name, "__cfi_", 6))
883
continue;
884
885
if (!elf_init_reloc_text_sym(file->elf, sec,
886
idx * sizeof(unsigned int), idx,
887
sym->sec, sym->offset))
888
return -1;
889
890
idx++;
891
}
892
893
return 0;
894
}
895
896
static int create_mcount_loc_sections(struct objtool_file *file)
897
{
898
size_t addr_size = elf_addr_size(file->elf);
899
struct instruction *insn;
900
struct section *sec;
901
int idx;
902
903
sec = find_section_by_name(file->elf, "__mcount_loc");
904
if (sec) {
905
/*
906
* Livepatch modules have already extracted their __mcount_loc
907
* entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
908
*/
909
if (!file->klp)
910
WARN("file already has __mcount_loc section, skipping");
911
912
return 0;
913
}
914
915
if (list_empty(&file->mcount_loc_list))
916
return 0;
917
918
idx = 0;
919
list_for_each_entry(insn, &file->mcount_loc_list, call_node)
920
idx++;
921
922
sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
923
idx, idx);
924
if (!sec)
925
return -1;
926
927
sec->sh.sh_addralign = addr_size;
928
929
idx = 0;
930
list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
931
932
struct reloc *reloc;
933
934
reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
935
insn->sec, insn->offset);
936
if (!reloc)
937
return -1;
938
939
set_reloc_type(file->elf, reloc, addr_size == 8 ? R_ABS64 : R_ABS32);
940
941
idx++;
942
}
943
944
return 0;
945
}
946
947
static int create_direct_call_sections(struct objtool_file *file)
948
{
949
struct instruction *insn;
950
struct section *sec;
951
int idx;
952
953
sec = find_section_by_name(file->elf, ".call_sites");
954
if (sec) {
955
WARN("file already has .call_sites section, skipping");
956
return 0;
957
}
958
959
if (list_empty(&file->call_list))
960
return 0;
961
962
idx = 0;
963
list_for_each_entry(insn, &file->call_list, call_node)
964
idx++;
965
966
sec = elf_create_section_pair(file->elf, ".call_sites",
967
sizeof(unsigned int), idx, idx);
968
if (!sec)
969
return -1;
970
971
idx = 0;
972
list_for_each_entry(insn, &file->call_list, call_node) {
973
974
if (!elf_init_reloc_text_sym(file->elf, sec,
975
idx * sizeof(unsigned int), idx,
976
insn->sec, insn->offset))
977
return -1;
978
979
idx++;
980
}
981
982
return 0;
983
}
984
985
#ifdef BUILD_KLP
986
static int create_sym_checksum_section(struct objtool_file *file)
987
{
988
struct section *sec;
989
struct symbol *sym;
990
unsigned int idx = 0;
991
struct sym_checksum *checksum;
992
size_t entsize = sizeof(struct sym_checksum);
993
994
sec = find_section_by_name(file->elf, ".discard.sym_checksum");
995
if (sec) {
996
if (!opts.dryrun)
997
WARN("file already has .discard.sym_checksum section, skipping");
998
999
return 0;
1000
}
1001
1002
for_each_sym(file->elf, sym)
1003
if (sym->csum.checksum)
1004
idx++;
1005
1006
if (!idx)
1007
return 0;
1008
1009
sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
1010
idx, idx);
1011
if (!sec)
1012
return -1;
1013
1014
idx = 0;
1015
for_each_sym(file->elf, sym) {
1016
if (!sym->csum.checksum)
1017
continue;
1018
1019
if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
1020
sym, 0, R_TEXT64))
1021
return -1;
1022
1023
checksum = (struct sym_checksum *)sec->data->d_buf + idx;
1024
checksum->addr = 0; /* reloc */
1025
checksum->checksum = sym->csum.checksum;
1026
1027
mark_sec_changed(file->elf, sec, true);
1028
1029
idx++;
1030
}
1031
1032
return 0;
1033
}
1034
#else
1035
static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
1036
#endif
1037
1038
/*
1039
* Warnings shouldn't be reported for ignored functions.
1040
*/
1041
static int add_ignores(struct objtool_file *file)
1042
{
1043
struct section *rsec;
1044
struct symbol *func;
1045
struct reloc *reloc;
1046
1047
rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1048
if (!rsec)
1049
return 0;
1050
1051
for_each_reloc(rsec, reloc) {
1052
switch (reloc->sym->type) {
1053
case STT_FUNC:
1054
func = reloc->sym;
1055
break;
1056
1057
case STT_SECTION:
1058
func = find_func_by_offset(reloc->sym->sec, reloc_addend(reloc));
1059
if (!func)
1060
continue;
1061
break;
1062
1063
default:
1064
ERROR("unexpected relocation symbol type in %s: %d",
1065
rsec->name, reloc->sym->type);
1066
return -1;
1067
}
1068
1069
func->ignore = true;
1070
if (func->cfunc)
1071
func->cfunc->ignore = true;
1072
}
1073
1074
return 0;
1075
}
1076
1077
/*
1078
* This is a whitelist of functions that is allowed to be called with AC set.
1079
* The list is meant to be minimal and only contains compiler instrumentation
1080
* ABI and a few functions used to implement *_{to,from}_user() functions.
1081
*
1082
* These functions must not directly change AC, but may PUSHF/POPF.
1083
*/
1084
static const char *uaccess_safe_builtin[] = {
1085
/* KASAN */
1086
"kasan_report",
1087
"kasan_check_range",
1088
/* KASAN out-of-line */
1089
"__asan_loadN_noabort",
1090
"__asan_load1_noabort",
1091
"__asan_load2_noabort",
1092
"__asan_load4_noabort",
1093
"__asan_load8_noabort",
1094
"__asan_load16_noabort",
1095
"__asan_storeN_noabort",
1096
"__asan_store1_noabort",
1097
"__asan_store2_noabort",
1098
"__asan_store4_noabort",
1099
"__asan_store8_noabort",
1100
"__asan_store16_noabort",
1101
"__kasan_check_read",
1102
"__kasan_check_write",
1103
/* KASAN in-line */
1104
"__asan_report_load_n_noabort",
1105
"__asan_report_load1_noabort",
1106
"__asan_report_load2_noabort",
1107
"__asan_report_load4_noabort",
1108
"__asan_report_load8_noabort",
1109
"__asan_report_load16_noabort",
1110
"__asan_report_store_n_noabort",
1111
"__asan_report_store1_noabort",
1112
"__asan_report_store2_noabort",
1113
"__asan_report_store4_noabort",
1114
"__asan_report_store8_noabort",
1115
"__asan_report_store16_noabort",
1116
/* KCSAN */
1117
"__kcsan_check_access",
1118
"__kcsan_mb",
1119
"__kcsan_wmb",
1120
"__kcsan_rmb",
1121
"__kcsan_release",
1122
"kcsan_found_watchpoint",
1123
"kcsan_setup_watchpoint",
1124
"kcsan_check_scoped_accesses",
1125
"kcsan_disable_current",
1126
"kcsan_enable_current_nowarn",
1127
/* KCSAN/TSAN */
1128
"__tsan_func_entry",
1129
"__tsan_func_exit",
1130
"__tsan_read_range",
1131
"__tsan_write_range",
1132
"__tsan_read1",
1133
"__tsan_read2",
1134
"__tsan_read4",
1135
"__tsan_read8",
1136
"__tsan_read16",
1137
"__tsan_write1",
1138
"__tsan_write2",
1139
"__tsan_write4",
1140
"__tsan_write8",
1141
"__tsan_write16",
1142
"__tsan_read_write1",
1143
"__tsan_read_write2",
1144
"__tsan_read_write4",
1145
"__tsan_read_write8",
1146
"__tsan_read_write16",
1147
"__tsan_volatile_read1",
1148
"__tsan_volatile_read2",
1149
"__tsan_volatile_read4",
1150
"__tsan_volatile_read8",
1151
"__tsan_volatile_read16",
1152
"__tsan_volatile_write1",
1153
"__tsan_volatile_write2",
1154
"__tsan_volatile_write4",
1155
"__tsan_volatile_write8",
1156
"__tsan_volatile_write16",
1157
"__tsan_atomic8_load",
1158
"__tsan_atomic16_load",
1159
"__tsan_atomic32_load",
1160
"__tsan_atomic64_load",
1161
"__tsan_atomic8_store",
1162
"__tsan_atomic16_store",
1163
"__tsan_atomic32_store",
1164
"__tsan_atomic64_store",
1165
"__tsan_atomic8_exchange",
1166
"__tsan_atomic16_exchange",
1167
"__tsan_atomic32_exchange",
1168
"__tsan_atomic64_exchange",
1169
"__tsan_atomic8_fetch_add",
1170
"__tsan_atomic16_fetch_add",
1171
"__tsan_atomic32_fetch_add",
1172
"__tsan_atomic64_fetch_add",
1173
"__tsan_atomic8_fetch_sub",
1174
"__tsan_atomic16_fetch_sub",
1175
"__tsan_atomic32_fetch_sub",
1176
"__tsan_atomic64_fetch_sub",
1177
"__tsan_atomic8_fetch_and",
1178
"__tsan_atomic16_fetch_and",
1179
"__tsan_atomic32_fetch_and",
1180
"__tsan_atomic64_fetch_and",
1181
"__tsan_atomic8_fetch_or",
1182
"__tsan_atomic16_fetch_or",
1183
"__tsan_atomic32_fetch_or",
1184
"__tsan_atomic64_fetch_or",
1185
"__tsan_atomic8_fetch_xor",
1186
"__tsan_atomic16_fetch_xor",
1187
"__tsan_atomic32_fetch_xor",
1188
"__tsan_atomic64_fetch_xor",
1189
"__tsan_atomic8_fetch_nand",
1190
"__tsan_atomic16_fetch_nand",
1191
"__tsan_atomic32_fetch_nand",
1192
"__tsan_atomic64_fetch_nand",
1193
"__tsan_atomic8_compare_exchange_strong",
1194
"__tsan_atomic16_compare_exchange_strong",
1195
"__tsan_atomic32_compare_exchange_strong",
1196
"__tsan_atomic64_compare_exchange_strong",
1197
"__tsan_atomic8_compare_exchange_weak",
1198
"__tsan_atomic16_compare_exchange_weak",
1199
"__tsan_atomic32_compare_exchange_weak",
1200
"__tsan_atomic64_compare_exchange_weak",
1201
"__tsan_atomic8_compare_exchange_val",
1202
"__tsan_atomic16_compare_exchange_val",
1203
"__tsan_atomic32_compare_exchange_val",
1204
"__tsan_atomic64_compare_exchange_val",
1205
"__tsan_atomic_thread_fence",
1206
"__tsan_atomic_signal_fence",
1207
"__tsan_unaligned_read16",
1208
"__tsan_unaligned_write16",
1209
/* KCOV */
1210
"write_comp_data",
1211
"check_kcov_mode",
1212
"__sanitizer_cov_trace_pc",
1213
"__sanitizer_cov_trace_const_cmp1",
1214
"__sanitizer_cov_trace_const_cmp2",
1215
"__sanitizer_cov_trace_const_cmp4",
1216
"__sanitizer_cov_trace_const_cmp8",
1217
"__sanitizer_cov_trace_cmp1",
1218
"__sanitizer_cov_trace_cmp2",
1219
"__sanitizer_cov_trace_cmp4",
1220
"__sanitizer_cov_trace_cmp8",
1221
"__sanitizer_cov_trace_switch",
1222
/* KMSAN */
1223
"kmsan_copy_to_user",
1224
"kmsan_disable_current",
1225
"kmsan_enable_current",
1226
"kmsan_report",
1227
"kmsan_unpoison_entry_regs",
1228
"kmsan_unpoison_memory",
1229
"__msan_chain_origin",
1230
"__msan_get_context_state",
1231
"__msan_instrument_asm_store",
1232
"__msan_metadata_ptr_for_load_1",
1233
"__msan_metadata_ptr_for_load_2",
1234
"__msan_metadata_ptr_for_load_4",
1235
"__msan_metadata_ptr_for_load_8",
1236
"__msan_metadata_ptr_for_load_n",
1237
"__msan_metadata_ptr_for_store_1",
1238
"__msan_metadata_ptr_for_store_2",
1239
"__msan_metadata_ptr_for_store_4",
1240
"__msan_metadata_ptr_for_store_8",
1241
"__msan_metadata_ptr_for_store_n",
1242
"__msan_poison_alloca",
1243
"__msan_warning",
1244
/* UBSAN */
1245
"ubsan_type_mismatch_common",
1246
"__ubsan_handle_type_mismatch",
1247
"__ubsan_handle_type_mismatch_v1",
1248
"__ubsan_handle_shift_out_of_bounds",
1249
"__ubsan_handle_load_invalid_value",
1250
/* KSTACK_ERASE */
1251
"__sanitizer_cov_stack_depth",
1252
/* TRACE_BRANCH_PROFILING */
1253
"ftrace_likely_update",
1254
/* STACKPROTECTOR */
1255
"__stack_chk_fail",
1256
/* misc */
1257
"csum_partial_copy_generic",
1258
"copy_mc_fragile",
1259
"copy_mc_fragile_handle_tail",
1260
"copy_mc_enhanced_fast_string",
1261
"rep_stos_alternative",
1262
"rep_movs_alternative",
1263
"__copy_user_nocache",
1264
NULL
1265
};
1266
1267
static void add_uaccess_safe(struct objtool_file *file)
1268
{
1269
struct symbol *func;
1270
const char **name;
1271
1272
if (!opts.uaccess)
1273
return;
1274
1275
for (name = uaccess_safe_builtin; *name; name++) {
1276
func = find_symbol_by_name(file->elf, *name);
1277
if (!func)
1278
continue;
1279
1280
func->uaccess_safe = true;
1281
}
1282
}
1283
1284
/*
1285
* Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1286
* will be added to the .retpoline_sites section.
1287
*/
1288
__weak bool arch_is_retpoline(struct symbol *sym)
1289
{
1290
return false;
1291
}
1292
1293
/*
1294
* Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1295
* will be added to the .return_sites section.
1296
*/
1297
__weak bool arch_is_rethunk(struct symbol *sym)
1298
{
1299
return false;
1300
}
1301
1302
/*
1303
* Symbols that are embedded inside other instructions, because sometimes crazy
1304
* code exists. These are mostly ignored for validation purposes.
1305
*/
1306
__weak bool arch_is_embedded_insn(struct symbol *sym)
1307
{
1308
return false;
1309
}
1310
1311
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1312
{
1313
struct reloc *reloc;
1314
1315
if (insn->no_reloc)
1316
return NULL;
1317
1318
if (!file)
1319
return NULL;
1320
1321
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1322
insn->offset, insn->len);
1323
if (!reloc) {
1324
insn->no_reloc = 1;
1325
return NULL;
1326
}
1327
1328
return reloc;
1329
}
1330
1331
static void remove_insn_ops(struct instruction *insn)
1332
{
1333
struct stack_op *op, *next;
1334
1335
for (op = insn->stack_ops; op; op = next) {
1336
next = op->next;
1337
free(op);
1338
}
1339
insn->stack_ops = NULL;
1340
}
1341
1342
static int annotate_call_site(struct objtool_file *file,
1343
struct instruction *insn, bool sibling)
1344
{
1345
struct reloc *reloc = insn_reloc(file, insn);
1346
struct symbol *sym = insn_call_dest(insn);
1347
1348
if (!sym)
1349
sym = reloc->sym;
1350
1351
if (sym->static_call_tramp) {
1352
list_add_tail(&insn->call_node, &file->static_call_list);
1353
return 0;
1354
}
1355
1356
if (sym->retpoline_thunk) {
1357
list_add_tail(&insn->call_node, &file->retpoline_call_list);
1358
return 0;
1359
}
1360
1361
/*
1362
* Many compilers cannot disable KCOV or sanitizer calls with a function
1363
* attribute so they need a little help, NOP out any such calls from
1364
* noinstr text.
1365
*/
1366
if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1367
if (reloc)
1368
set_reloc_type(file->elf, reloc, R_NONE);
1369
1370
if (elf_write_insn(file->elf, insn->sec,
1371
insn->offset, insn->len,
1372
sibling ? arch_ret_insn(insn->len)
1373
: arch_nop_insn(insn->len))) {
1374
return -1;
1375
}
1376
1377
insn->type = sibling ? INSN_RETURN : INSN_NOP;
1378
1379
if (sibling) {
1380
/*
1381
* We've replaced the tail-call JMP insn by two new
1382
* insn: RET; INT3, except we only have a single struct
1383
* insn here. Mark it retpoline_safe to avoid the SLS
1384
* warning, instead of adding another insn.
1385
*/
1386
insn->retpoline_safe = true;
1387
}
1388
1389
return 0;
1390
}
1391
1392
if (opts.mcount && sym->fentry) {
1393
if (sibling)
1394
WARN_INSN(insn, "tail call to __fentry__ !?!?");
1395
if (opts.mnop) {
1396
if (reloc)
1397
set_reloc_type(file->elf, reloc, R_NONE);
1398
1399
if (elf_write_insn(file->elf, insn->sec,
1400
insn->offset, insn->len,
1401
arch_nop_insn(insn->len))) {
1402
return -1;
1403
}
1404
1405
insn->type = INSN_NOP;
1406
}
1407
1408
list_add_tail(&insn->call_node, &file->mcount_loc_list);
1409
return 0;
1410
}
1411
1412
if (insn->type == INSN_CALL && !insn->sec->init &&
1413
!insn->_call_dest->embedded_insn)
1414
list_add_tail(&insn->call_node, &file->call_list);
1415
1416
if (!sibling && dead_end_function(file, sym))
1417
insn->dead_end = true;
1418
1419
return 0;
1420
}
1421
1422
static int add_call_dest(struct objtool_file *file, struct instruction *insn,
1423
struct symbol *dest, bool sibling)
1424
{
1425
insn->_call_dest = dest;
1426
if (!dest)
1427
return 0;
1428
1429
/*
1430
* Whatever stack impact regular CALLs have, should be undone
1431
* by the RETURN of the called function.
1432
*
1433
* Annotated intra-function calls retain the stack_ops but
1434
* are converted to JUMP, see read_intra_function_calls().
1435
*/
1436
remove_insn_ops(insn);
1437
1438
return annotate_call_site(file, insn, sibling);
1439
}
1440
1441
static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1442
{
1443
/*
1444
* Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1445
* so convert them accordingly.
1446
*/
1447
switch (insn->type) {
1448
case INSN_CALL:
1449
insn->type = INSN_CALL_DYNAMIC;
1450
break;
1451
case INSN_JUMP_UNCONDITIONAL:
1452
insn->type = INSN_JUMP_DYNAMIC;
1453
break;
1454
case INSN_JUMP_CONDITIONAL:
1455
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1456
break;
1457
default:
1458
return 0;
1459
}
1460
1461
insn->retpoline_safe = true;
1462
1463
/*
1464
* Whatever stack impact regular CALLs have, should be undone
1465
* by the RETURN of the called function.
1466
*
1467
* Annotated intra-function calls retain the stack_ops but
1468
* are converted to JUMP, see read_intra_function_calls().
1469
*/
1470
remove_insn_ops(insn);
1471
1472
return annotate_call_site(file, insn, false);
1473
}
1474
1475
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1476
{
1477
/*
1478
* Return thunk tail calls are really just returns in disguise,
1479
* so convert them accordingly.
1480
*/
1481
insn->type = INSN_RETURN;
1482
insn->retpoline_safe = true;
1483
1484
if (add)
1485
list_add_tail(&insn->call_node, &file->return_thunk_list);
1486
}
1487
1488
static bool is_first_func_insn(struct objtool_file *file,
1489
struct instruction *insn)
1490
{
1491
struct symbol *func = insn_func(insn);
1492
1493
if (!func)
1494
return false;
1495
1496
if (insn->offset == func->offset)
1497
return true;
1498
1499
/* Allow direct CALL/JMP past ENDBR */
1500
if (opts.ibt) {
1501
struct instruction *prev = prev_insn_same_sym(file, insn);
1502
1503
if (prev && prev->type == INSN_ENDBR &&
1504
insn->offset == func->offset + prev->len)
1505
return true;
1506
}
1507
1508
return false;
1509
}
1510
1511
/*
1512
* Find the destination instructions for all jumps.
1513
*/
1514
static int add_jump_destinations(struct objtool_file *file)
1515
{
1516
struct instruction *insn;
1517
struct reloc *reloc;
1518
1519
for_each_insn(file, insn) {
1520
struct symbol *func = insn_func(insn);
1521
struct instruction *dest_insn;
1522
struct section *dest_sec;
1523
struct symbol *dest_sym;
1524
unsigned long dest_off;
1525
1526
if (!is_static_jump(insn))
1527
continue;
1528
1529
if (insn->jump_dest) {
1530
/*
1531
* handle_group_alt() may have previously set
1532
* 'jump_dest' for some alternatives.
1533
*/
1534
continue;
1535
}
1536
1537
reloc = insn_reloc(file, insn);
1538
if (!reloc) {
1539
dest_sec = insn->sec;
1540
dest_off = arch_jump_destination(insn);
1541
dest_sym = dest_sec->sym;
1542
} else {
1543
dest_sym = reloc->sym;
1544
if (is_undef_sym(dest_sym)) {
1545
if (dest_sym->retpoline_thunk) {
1546
if (add_retpoline_call(file, insn))
1547
return -1;
1548
continue;
1549
}
1550
1551
if (dest_sym->return_thunk) {
1552
add_return_call(file, insn, true);
1553
continue;
1554
}
1555
1556
/* External symbol */
1557
if (func) {
1558
/* External sibling call */
1559
if (add_call_dest(file, insn, dest_sym, true))
1560
return -1;
1561
continue;
1562
}
1563
1564
/* Non-func asm code jumping to external symbol */
1565
continue;
1566
}
1567
1568
dest_sec = dest_sym->sec;
1569
dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
1570
}
1571
1572
dest_insn = find_insn(file, dest_sec, dest_off);
1573
if (!dest_insn) {
1574
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1575
1576
/*
1577
* retbleed_untrain_ret() jumps to
1578
* __x86_return_thunk(), but objtool can't find
1579
* the thunk's starting RET instruction,
1580
* because the RET is also in the middle of
1581
* another instruction. Objtool only knows
1582
* about the outer instruction.
1583
*/
1584
if (sym && sym->embedded_insn) {
1585
add_return_call(file, insn, false);
1586
continue;
1587
}
1588
1589
/*
1590
* GCOV/KCOV dead code can jump to the end of
1591
* the function/section.
1592
*/
1593
if (file->ignore_unreachables && func &&
1594
dest_sec == insn->sec &&
1595
dest_off == func->offset + func->len)
1596
continue;
1597
1598
ERROR_INSN(insn, "can't find jump dest instruction at %s",
1599
offstr(dest_sec, dest_off));
1600
return -1;
1601
}
1602
1603
if (!dest_sym || is_sec_sym(dest_sym)) {
1604
dest_sym = dest_insn->sym;
1605
if (!dest_sym)
1606
goto set_jump_dest;
1607
}
1608
1609
if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
1610
if (add_retpoline_call(file, insn))
1611
return -1;
1612
continue;
1613
}
1614
1615
if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
1616
add_return_call(file, insn, true);
1617
continue;
1618
}
1619
1620
if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
1621
goto set_jump_dest;
1622
1623
/*
1624
* Internal cross-function jump.
1625
*/
1626
1627
if (is_first_func_insn(file, dest_insn)) {
1628
/* Internal sibling call */
1629
if (add_call_dest(file, insn, dest_sym, true))
1630
return -1;
1631
continue;
1632
}
1633
1634
set_jump_dest:
1635
insn->jump_dest = dest_insn;
1636
}
1637
1638
return 0;
1639
}
1640
1641
static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1642
{
1643
struct symbol *call_dest;
1644
1645
call_dest = find_func_by_offset(sec, offset);
1646
if (!call_dest)
1647
call_dest = find_symbol_by_offset(sec, offset);
1648
1649
return call_dest;
1650
}
1651
1652
/*
1653
* Find the destination instructions for all calls.
1654
*/
1655
static int add_call_destinations(struct objtool_file *file)
1656
{
1657
struct instruction *insn;
1658
unsigned long dest_off;
1659
struct symbol *dest;
1660
struct reloc *reloc;
1661
1662
for_each_insn(file, insn) {
1663
struct symbol *func = insn_func(insn);
1664
if (insn->type != INSN_CALL)
1665
continue;
1666
1667
reloc = insn_reloc(file, insn);
1668
if (!reloc) {
1669
dest_off = arch_jump_destination(insn);
1670
dest = find_call_destination(insn->sec, dest_off);
1671
1672
if (add_call_dest(file, insn, dest, false))
1673
return -1;
1674
1675
if (func && func->ignore)
1676
continue;
1677
1678
if (!insn_call_dest(insn)) {
1679
ERROR_INSN(insn, "unannotated intra-function call");
1680
return -1;
1681
}
1682
1683
if (func && !is_func_sym(insn_call_dest(insn))) {
1684
ERROR_INSN(insn, "unsupported call to non-function");
1685
return -1;
1686
}
1687
1688
} else if (is_sec_sym(reloc->sym)) {
1689
dest_off = arch_insn_adjusted_addend(insn, reloc);
1690
dest = find_call_destination(reloc->sym->sec, dest_off);
1691
if (!dest) {
1692
ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1693
reloc->sym->sec->name, dest_off);
1694
return -1;
1695
}
1696
1697
if (add_call_dest(file, insn, dest, false))
1698
return -1;
1699
1700
} else if (reloc->sym->retpoline_thunk) {
1701
if (add_retpoline_call(file, insn))
1702
return -1;
1703
1704
} else {
1705
if (add_call_dest(file, insn, reloc->sym, false))
1706
return -1;
1707
}
1708
}
1709
1710
return 0;
1711
}
1712
1713
/*
1714
* The .alternatives section requires some extra special care over and above
1715
* other special sections because alternatives are patched in place.
1716
*/
1717
static int handle_group_alt(struct objtool_file *file,
1718
struct special_alt *special_alt,
1719
struct instruction *orig_insn,
1720
struct instruction **new_insn)
1721
{
1722
struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1723
struct alt_group *orig_alt_group, *new_alt_group;
1724
unsigned long dest_off;
1725
1726
orig_alt_group = orig_insn->alt_group;
1727
if (!orig_alt_group) {
1728
struct instruction *last_orig_insn = NULL;
1729
1730
orig_alt_group = calloc(1, sizeof(*orig_alt_group));
1731
if (!orig_alt_group) {
1732
ERROR_GLIBC("calloc");
1733
return -1;
1734
}
1735
orig_alt_group->cfi = calloc(special_alt->orig_len,
1736
sizeof(struct cfi_state *));
1737
if (!orig_alt_group->cfi) {
1738
ERROR_GLIBC("calloc");
1739
return -1;
1740
}
1741
1742
insn = orig_insn;
1743
sec_for_each_insn_from(file, insn) {
1744
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1745
break;
1746
1747
insn->alt_group = orig_alt_group;
1748
last_orig_insn = insn;
1749
}
1750
orig_alt_group->orig_group = NULL;
1751
orig_alt_group->first_insn = orig_insn;
1752
orig_alt_group->last_insn = last_orig_insn;
1753
orig_alt_group->nop = NULL;
1754
orig_alt_group->ignore = orig_insn->ignore_alts;
1755
orig_alt_group->feature = 0;
1756
} else {
1757
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1758
orig_alt_group->first_insn->offset != special_alt->orig_len) {
1759
ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1760
orig_alt_group->last_insn->offset +
1761
orig_alt_group->last_insn->len -
1762
orig_alt_group->first_insn->offset,
1763
special_alt->orig_len);
1764
return -1;
1765
}
1766
}
1767
1768
new_alt_group = calloc(1, sizeof(*new_alt_group));
1769
if (!new_alt_group) {
1770
ERROR_GLIBC("calloc");
1771
return -1;
1772
}
1773
1774
if (special_alt->new_len < special_alt->orig_len) {
1775
/*
1776
* Insert a fake nop at the end to make the replacement
1777
* alt_group the same size as the original. This is needed to
1778
* allow propagate_alt_cfi() to do its magic. When the last
1779
* instruction affects the stack, the instruction after it (the
1780
* nop) will propagate the new state to the shared CFI array.
1781
*/
1782
nop = calloc(1, sizeof(*nop));
1783
if (!nop) {
1784
ERROR_GLIBC("calloc");
1785
return -1;
1786
}
1787
memset(nop, 0, sizeof(*nop));
1788
1789
nop->sec = special_alt->new_sec;
1790
nop->offset = special_alt->new_off + special_alt->new_len;
1791
nop->len = special_alt->orig_len - special_alt->new_len;
1792
nop->type = INSN_NOP;
1793
nop->sym = orig_insn->sym;
1794
nop->alt_group = new_alt_group;
1795
nop->fake = 1;
1796
}
1797
1798
if (!special_alt->new_len) {
1799
*new_insn = nop;
1800
goto end;
1801
}
1802
1803
insn = *new_insn;
1804
sec_for_each_insn_from(file, insn) {
1805
struct reloc *alt_reloc;
1806
1807
if (insn->offset >= special_alt->new_off + special_alt->new_len)
1808
break;
1809
1810
last_new_insn = insn;
1811
1812
insn->sym = orig_insn->sym;
1813
insn->alt_group = new_alt_group;
1814
1815
/*
1816
* Since alternative replacement code is copy/pasted by the
1817
* kernel after applying relocations, generally such code can't
1818
* have relative-address relocation references to outside the
1819
* .altinstr_replacement section, unless the arch's
1820
* alternatives code can adjust the relative offsets
1821
* accordingly.
1822
*/
1823
alt_reloc = insn_reloc(file, insn);
1824
if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1825
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1826
1827
ERROR_INSN(insn, "unsupported relocation in alternatives section");
1828
return -1;
1829
}
1830
1831
if (!is_static_jump(insn))
1832
continue;
1833
1834
if (!insn->immediate)
1835
continue;
1836
1837
dest_off = arch_jump_destination(insn);
1838
if (dest_off == special_alt->new_off + special_alt->new_len) {
1839
insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1840
if (!insn->jump_dest) {
1841
ERROR_INSN(insn, "can't find alternative jump destination");
1842
return -1;
1843
}
1844
}
1845
}
1846
1847
if (!last_new_insn) {
1848
ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1849
"can't find last new alternative instruction");
1850
return -1;
1851
}
1852
1853
end:
1854
new_alt_group->orig_group = orig_alt_group;
1855
new_alt_group->first_insn = *new_insn;
1856
new_alt_group->last_insn = last_new_insn;
1857
new_alt_group->nop = nop;
1858
new_alt_group->ignore = (*new_insn)->ignore_alts;
1859
new_alt_group->cfi = orig_alt_group->cfi;
1860
new_alt_group->feature = special_alt->feature;
1861
return 0;
1862
}
1863
1864
/*
1865
* A jump table entry can either convert a nop to a jump or a jump to a nop.
1866
* If the original instruction is a jump, make the alt entry an effective nop
1867
* by just skipping the original instruction.
1868
*/
1869
static int handle_jump_alt(struct objtool_file *file,
1870
struct special_alt *special_alt,
1871
struct instruction *orig_insn,
1872
struct instruction **new_insn)
1873
{
1874
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1875
orig_insn->type != INSN_NOP) {
1876
1877
ERROR_INSN(orig_insn, "unsupported instruction at jump label");
1878
return -1;
1879
}
1880
1881
if (opts.hack_jump_label && special_alt->key_addend & 2) {
1882
struct reloc *reloc = insn_reloc(file, orig_insn);
1883
1884
if (reloc)
1885
set_reloc_type(file->elf, reloc, R_NONE);
1886
1887
if (elf_write_insn(file->elf, orig_insn->sec,
1888
orig_insn->offset, orig_insn->len,
1889
arch_nop_insn(orig_insn->len))) {
1890
return -1;
1891
}
1892
1893
orig_insn->type = INSN_NOP;
1894
}
1895
1896
if (orig_insn->type == INSN_NOP) {
1897
if (orig_insn->len == 2)
1898
file->jl_nop_short++;
1899
else
1900
file->jl_nop_long++;
1901
1902
return 0;
1903
}
1904
1905
if (orig_insn->len == 2)
1906
file->jl_short++;
1907
else
1908
file->jl_long++;
1909
1910
*new_insn = next_insn_same_sec(file, orig_insn);
1911
return 0;
1912
}
1913
1914
/*
1915
* Read all the special sections which have alternate instructions which can be
1916
* patched in or redirected to at runtime. Each instruction having alternate
1917
* instruction(s) has them added to its insn->alts list, which will be
1918
* traversed in validate_branch().
1919
*/
1920
static int add_special_section_alts(struct objtool_file *file)
1921
{
1922
struct list_head special_alts;
1923
struct instruction *orig_insn, *new_insn;
1924
struct special_alt *special_alt, *tmp;
1925
enum alternative_type alt_type;
1926
struct alternative *alt;
1927
struct alternative *a;
1928
1929
if (special_get_alts(file->elf, &special_alts))
1930
return -1;
1931
1932
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1933
1934
orig_insn = find_insn(file, special_alt->orig_sec,
1935
special_alt->orig_off);
1936
if (!orig_insn) {
1937
ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
1938
"special: can't find orig instruction");
1939
return -1;
1940
}
1941
1942
new_insn = NULL;
1943
if (!special_alt->group || special_alt->new_len) {
1944
new_insn = find_insn(file, special_alt->new_sec,
1945
special_alt->new_off);
1946
if (!new_insn) {
1947
ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
1948
"special: can't find new instruction");
1949
return -1;
1950
}
1951
}
1952
1953
if (special_alt->group) {
1954
if (!special_alt->orig_len) {
1955
ERROR_INSN(orig_insn, "empty alternative entry");
1956
continue;
1957
}
1958
1959
if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
1960
return -1;
1961
1962
alt_type = ALT_TYPE_INSTRUCTIONS;
1963
1964
} else if (special_alt->jump_or_nop) {
1965
if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
1966
return -1;
1967
1968
alt_type = ALT_TYPE_JUMP_TABLE;
1969
} else {
1970
alt_type = ALT_TYPE_EX_TABLE;
1971
}
1972
1973
alt = calloc(1, sizeof(*alt));
1974
if (!alt) {
1975
ERROR_GLIBC("calloc");
1976
return -1;
1977
}
1978
1979
alt->insn = new_insn;
1980
alt->type = alt_type;
1981
alt->next = NULL;
1982
1983
/*
1984
* Store alternatives in the same order they have been
1985
* defined.
1986
*/
1987
if (!orig_insn->alts) {
1988
orig_insn->alts = alt;
1989
} else {
1990
for (a = orig_insn->alts; a->next; a = a->next)
1991
;
1992
a->next = alt;
1993
}
1994
1995
list_del(&special_alt->list);
1996
free(special_alt);
1997
}
1998
1999
if (opts.stats) {
2000
printf("jl\\\tNOP\tJMP\n");
2001
printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2002
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2003
}
2004
2005
return 0;
2006
}
2007
2008
__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
2009
{
2010
return reloc->sym->offset + reloc_addend(reloc);
2011
}
2012
2013
static int add_jump_table(struct objtool_file *file, struct instruction *insn)
2014
{
2015
unsigned long table_size = insn_jump_table_size(insn);
2016
struct symbol *pfunc = insn_func(insn)->pfunc;
2017
struct reloc *table = insn_jump_table(insn);
2018
struct instruction *dest_insn;
2019
unsigned int prev_offset = 0;
2020
struct reloc *reloc = table;
2021
struct alternative *alt;
2022
unsigned long sym_offset;
2023
2024
/*
2025
* Each @reloc is a switch table relocation which points to the target
2026
* instruction.
2027
*/
2028
for_each_reloc_from(table->sec, reloc) {
2029
2030
/* Check for the end of the table: */
2031
if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
2032
break;
2033
if (reloc != table && is_jump_table(reloc))
2034
break;
2035
2036
/* Make sure the table entries are consecutive: */
2037
if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
2038
break;
2039
2040
sym_offset = arch_jump_table_sym_offset(reloc, table);
2041
2042
/* Detect function pointers from contiguous objects: */
2043
if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
2044
break;
2045
2046
/*
2047
* Clang sometimes leaves dangling unused jump table entries
2048
* which point to the end of the function. Ignore them.
2049
*/
2050
if (reloc->sym->sec == pfunc->sec &&
2051
sym_offset == pfunc->offset + pfunc->len)
2052
goto next;
2053
2054
dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
2055
if (!dest_insn)
2056
break;
2057
2058
/* Make sure the destination is in the same function: */
2059
if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2060
break;
2061
2062
alt = calloc(1, sizeof(*alt));
2063
if (!alt) {
2064
ERROR_GLIBC("calloc");
2065
return -1;
2066
}
2067
2068
alt->insn = dest_insn;
2069
alt->next = insn->alts;
2070
insn->alts = alt;
2071
next:
2072
prev_offset = reloc_offset(reloc);
2073
}
2074
2075
if (!prev_offset) {
2076
ERROR_INSN(insn, "can't find switch jump table");
2077
return -1;
2078
}
2079
2080
return 0;
2081
}
2082
2083
/*
2084
* find_jump_table() - Given a dynamic jump, find the switch jump table
2085
* associated with it.
2086
*/
2087
static void find_jump_table(struct objtool_file *file, struct symbol *func,
2088
struct instruction *insn)
2089
{
2090
struct reloc *table_reloc;
2091
struct instruction *dest_insn, *orig_insn = insn;
2092
unsigned long table_size;
2093
unsigned long sym_offset;
2094
2095
/*
2096
* Backward search using the @first_jump_src links, these help avoid
2097
* much of the 'in between' code. Which avoids us getting confused by
2098
* it.
2099
*/
2100
for (;
2101
insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2102
insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2103
2104
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2105
break;
2106
2107
/* allow small jumps within the range */
2108
if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2109
insn->jump_dest &&
2110
(insn->jump_dest->offset <= insn->offset ||
2111
insn->jump_dest->offset > orig_insn->offset))
2112
break;
2113
2114
table_reloc = arch_find_switch_table(file, insn, &table_size);
2115
if (!table_reloc)
2116
continue;
2117
2118
sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
2119
2120
dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
2121
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2122
continue;
2123
2124
set_jump_table(table_reloc);
2125
orig_insn->_jump_table = table_reloc;
2126
orig_insn->_jump_table_size = table_size;
2127
2128
break;
2129
}
2130
}
2131
2132
/*
2133
* First pass: Mark the head of each jump table so that in the next pass,
2134
* we know when a given jump table ends and the next one starts.
2135
*/
2136
static void mark_func_jump_tables(struct objtool_file *file,
2137
struct symbol *func)
2138
{
2139
struct instruction *insn, *last = NULL;
2140
2141
func_for_each_insn(file, func, insn) {
2142
if (!last)
2143
last = insn;
2144
2145
/*
2146
* Store back-pointers for unconditional forward jumps such
2147
* that find_jump_table() can back-track using those and
2148
* avoid some potentially confusing code.
2149
*/
2150
if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2151
insn->offset > last->offset &&
2152
insn->jump_dest->offset > insn->offset &&
2153
!insn->jump_dest->first_jump_src) {
2154
2155
insn->jump_dest->first_jump_src = insn;
2156
last = insn->jump_dest;
2157
}
2158
2159
if (insn->type != INSN_JUMP_DYNAMIC)
2160
continue;
2161
2162
find_jump_table(file, func, insn);
2163
}
2164
}
2165
2166
static int add_func_jump_tables(struct objtool_file *file,
2167
struct symbol *func)
2168
{
2169
struct instruction *insn;
2170
2171
func_for_each_insn(file, func, insn) {
2172
if (!insn_jump_table(insn))
2173
continue;
2174
2175
if (add_jump_table(file, insn))
2176
return -1;
2177
}
2178
2179
return 0;
2180
}
2181
2182
/*
2183
* For some switch statements, gcc generates a jump table in the .rodata
2184
* section which contains a list of addresses within the function to jump to.
2185
* This finds these jump tables and adds them to the insn->alts lists.
2186
*/
2187
static int add_jump_table_alts(struct objtool_file *file)
2188
{
2189
struct symbol *func;
2190
2191
if (!file->rodata)
2192
return 0;
2193
2194
for_each_sym(file->elf, func) {
2195
if (!is_func_sym(func) || func->alias != func)
2196
continue;
2197
2198
mark_func_jump_tables(file, func);
2199
if (add_func_jump_tables(file, func))
2200
return -1;
2201
}
2202
2203
return 0;
2204
}
2205
2206
static void set_func_state(struct cfi_state *state)
2207
{
2208
state->cfa = initial_func_cfi.cfa;
2209
memcpy(&state->regs, &initial_func_cfi.regs,
2210
CFI_NUM_REGS * sizeof(struct cfi_reg));
2211
state->stack_size = initial_func_cfi.cfa.offset;
2212
state->type = UNWIND_HINT_TYPE_CALL;
2213
}
2214
2215
static int read_unwind_hints(struct objtool_file *file)
2216
{
2217
struct cfi_state cfi = init_cfi;
2218
struct section *sec;
2219
struct unwind_hint *hint;
2220
struct instruction *insn;
2221
struct reloc *reloc;
2222
unsigned long offset;
2223
int i;
2224
2225
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2226
if (!sec)
2227
return 0;
2228
2229
if (!sec->rsec) {
2230
ERROR("missing .rela.discard.unwind_hints section");
2231
return -1;
2232
}
2233
2234
if (sec_size(sec) % sizeof(struct unwind_hint)) {
2235
ERROR("struct unwind_hint size mismatch");
2236
return -1;
2237
}
2238
2239
file->hints = true;
2240
2241
for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
2242
hint = (struct unwind_hint *)sec->data->d_buf + i;
2243
2244
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2245
if (!reloc) {
2246
ERROR("can't find reloc for unwind_hints[%d]", i);
2247
return -1;
2248
}
2249
2250
offset = reloc->sym->offset + reloc_addend(reloc);
2251
2252
insn = find_insn(file, reloc->sym->sec, offset);
2253
if (!insn) {
2254
ERROR("can't find insn for unwind_hints[%d]", i);
2255
return -1;
2256
}
2257
2258
insn->hint = true;
2259
2260
if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2261
insn->cfi = &force_undefined_cfi;
2262
continue;
2263
}
2264
2265
if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2266
insn->hint = false;
2267
insn->save = true;
2268
continue;
2269
}
2270
2271
if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2272
insn->restore = true;
2273
continue;
2274
}
2275
2276
if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2277
struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2278
2279
if (sym && is_global_sym(sym)) {
2280
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2281
ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2282
return -1;
2283
}
2284
}
2285
}
2286
2287
if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2288
insn->cfi = &func_cfi;
2289
continue;
2290
}
2291
2292
if (insn->cfi)
2293
cfi = *(insn->cfi);
2294
2295
if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2296
ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2297
return -1;
2298
}
2299
2300
cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2301
cfi.type = hint->type;
2302
cfi.signal = hint->signal;
2303
2304
insn->cfi = cfi_hash_find_or_add(&cfi);
2305
}
2306
2307
return 0;
2308
}
2309
2310
static int read_annotate(struct objtool_file *file,
2311
int (*func)(struct objtool_file *file, int type, struct instruction *insn))
2312
{
2313
struct section *sec;
2314
struct instruction *insn;
2315
struct reloc *reloc;
2316
uint64_t offset;
2317
int type;
2318
2319
sec = find_section_by_name(file->elf, ".discard.annotate_insn");
2320
if (!sec)
2321
return 0;
2322
2323
if (!sec->rsec)
2324
return 0;
2325
2326
if (sec->sh.sh_entsize != 8) {
2327
static bool warned = false;
2328
if (!warned && opts.verbose) {
2329
WARN("%s: dodgy linker, sh_entsize != 8", sec->name);
2330
warned = true;
2331
}
2332
sec->sh.sh_entsize = 8;
2333
}
2334
2335
if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
2336
ERROR("bad .discard.annotate_insn section: missing relocs");
2337
return -1;
2338
}
2339
2340
for_each_reloc(sec->rsec, reloc) {
2341
type = annotype(file->elf, sec, reloc);
2342
offset = reloc->sym->offset + reloc_addend(reloc);
2343
insn = find_insn(file, reloc->sym->sec, offset);
2344
2345
if (!insn) {
2346
ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
2347
return -1;
2348
}
2349
2350
if (func(file, type, insn))
2351
return -1;
2352
}
2353
2354
return 0;
2355
}
2356
2357
static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
2358
{
2359
switch (type) {
2360
2361
/* Must be before add_special_section_alts() */
2362
case ANNOTYPE_IGNORE_ALTS:
2363
insn->ignore_alts = true;
2364
break;
2365
2366
/*
2367
* Must be before read_unwind_hints() since that needs insn->noendbr.
2368
*/
2369
case ANNOTYPE_NOENDBR:
2370
insn->noendbr = 1;
2371
break;
2372
2373
default:
2374
break;
2375
}
2376
2377
return 0;
2378
}
2379
2380
static int __annotate_ifc(struct objtool_file *file, int type, struct instruction *insn)
2381
{
2382
unsigned long dest_off;
2383
2384
if (type != ANNOTYPE_INTRA_FUNCTION_CALL)
2385
return 0;
2386
2387
if (insn->type != INSN_CALL) {
2388
ERROR_INSN(insn, "intra_function_call not a direct call");
2389
return -1;
2390
}
2391
2392
/*
2393
* Treat intra-function CALLs as JMPs, but with a stack_op.
2394
* See add_call_destinations(), which strips stack_ops from
2395
* normal CALLs.
2396
*/
2397
insn->type = INSN_JUMP_UNCONDITIONAL;
2398
2399
dest_off = arch_jump_destination(insn);
2400
insn->jump_dest = find_insn(file, insn->sec, dest_off);
2401
if (!insn->jump_dest) {
2402
ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
2403
insn->sec->name, dest_off);
2404
return -1;
2405
}
2406
2407
return 0;
2408
}
2409
2410
static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
2411
{
2412
struct symbol *sym;
2413
2414
switch (type) {
2415
case ANNOTYPE_NOENDBR:
2416
/* early */
2417
break;
2418
2419
case ANNOTYPE_RETPOLINE_SAFE:
2420
if (insn->type != INSN_JUMP_DYNAMIC &&
2421
insn->type != INSN_CALL_DYNAMIC &&
2422
insn->type != INSN_RETURN &&
2423
insn->type != INSN_NOP) {
2424
ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2425
return -1;
2426
}
2427
2428
insn->retpoline_safe = true;
2429
break;
2430
2431
case ANNOTYPE_INSTR_BEGIN:
2432
insn->instr++;
2433
break;
2434
2435
case ANNOTYPE_INSTR_END:
2436
insn->instr--;
2437
break;
2438
2439
case ANNOTYPE_UNRET_BEGIN:
2440
insn->unret = 1;
2441
break;
2442
2443
case ANNOTYPE_IGNORE_ALTS:
2444
/* early */
2445
break;
2446
2447
case ANNOTYPE_INTRA_FUNCTION_CALL:
2448
/* ifc */
2449
break;
2450
2451
case ANNOTYPE_REACHABLE:
2452
insn->dead_end = false;
2453
break;
2454
2455
case ANNOTYPE_NOCFI:
2456
sym = insn->sym;
2457
if (!sym) {
2458
ERROR_INSN(insn, "dodgy NOCFI annotation");
2459
return -1;
2460
}
2461
insn->sym->nocfi = 1;
2462
break;
2463
2464
default:
2465
ERROR_INSN(insn, "Unknown annotation type: %d", type);
2466
return -1;
2467
}
2468
2469
return 0;
2470
}
2471
2472
/*
2473
* Return true if name matches an instrumentation function, where calls to that
2474
* function from noinstr code can safely be removed, but compilers won't do so.
2475
*/
2476
static bool is_profiling_func(const char *name)
2477
{
2478
/*
2479
* Many compilers cannot disable KCOV with a function attribute.
2480
*/
2481
if (!strncmp(name, "__sanitizer_cov_", 16))
2482
return true;
2483
2484
return false;
2485
}
2486
2487
static int classify_symbols(struct objtool_file *file)
2488
{
2489
struct symbol *func;
2490
size_t len;
2491
2492
for_each_sym(file->elf, func) {
2493
if (is_notype_sym(func) && strstarts(func->name, ".L"))
2494
func->local_label = true;
2495
2496
if (!is_global_sym(func))
2497
continue;
2498
2499
if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2500
strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2501
func->static_call_tramp = true;
2502
2503
if (arch_is_retpoline(func))
2504
func->retpoline_thunk = true;
2505
2506
if (arch_is_rethunk(func))
2507
func->return_thunk = true;
2508
2509
if (arch_is_embedded_insn(func))
2510
func->embedded_insn = true;
2511
2512
if (arch_ftrace_match(func->name))
2513
func->fentry = true;
2514
2515
if (is_profiling_func(func->name))
2516
func->profiling_func = true;
2517
2518
len = strlen(func->name);
2519
if (len > sym_name_max_len)
2520
sym_name_max_len = len;
2521
}
2522
2523
return 0;
2524
}
2525
2526
static void mark_rodata(struct objtool_file *file)
2527
{
2528
struct section *sec;
2529
bool found = false;
2530
2531
/*
2532
* Search for the following rodata sections, each of which can
2533
* potentially contain jump tables:
2534
*
2535
* - .rodata: can contain GCC switch tables
2536
* - .rodata.<func>: same, if -fdata-sections is being used
2537
* - .data.rel.ro.c_jump_table: contains C annotated jump tables
2538
*
2539
* .rodata.str1.* sections are ignored; they don't contain jump tables.
2540
*/
2541
for_each_sec(file->elf, sec) {
2542
if ((!strncmp(sec->name, ".rodata", 7) &&
2543
!strstr(sec->name, ".str1.")) ||
2544
!strncmp(sec->name, ".data.rel.ro", 12)) {
2545
sec->rodata = true;
2546
found = true;
2547
}
2548
}
2549
2550
file->rodata = found;
2551
}
2552
2553
static void mark_holes(struct objtool_file *file)
2554
{
2555
struct instruction *insn;
2556
bool in_hole = false;
2557
2558
if (!opts.link)
2559
return;
2560
2561
/*
2562
* Whole archive runs might encounter dead code from weak symbols.
2563
* This is where the linker will have dropped the weak symbol in
2564
* favour of a regular symbol, but leaves the code in place.
2565
*/
2566
for_each_insn(file, insn) {
2567
if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
2568
in_hole = false;
2569
continue;
2570
}
2571
2572
/* Skip function padding and pfx code */
2573
if (!in_hole && insn->type == INSN_NOP)
2574
continue;
2575
2576
in_hole = true;
2577
insn->hole = 1;
2578
2579
/*
2580
* If this hole jumps to a .cold function, mark it ignore.
2581
*/
2582
if (insn->jump_dest) {
2583
struct symbol *dest_func = insn_func(insn->jump_dest);
2584
2585
if (dest_func && dest_func->cold)
2586
dest_func->ignore = true;
2587
}
2588
}
2589
}
2590
2591
static bool validate_branch_enabled(void)
2592
{
2593
return opts.stackval ||
2594
opts.orc ||
2595
opts.uaccess ||
2596
opts.checksum;
2597
}
2598
2599
static int decode_sections(struct objtool_file *file)
2600
{
2601
file->klp = is_livepatch_module(file);
2602
2603
mark_rodata(file);
2604
2605
if (init_pv_ops(file))
2606
return -1;
2607
2608
/*
2609
* Must be before add_{jump_call}_destination.
2610
*/
2611
if (classify_symbols(file))
2612
return -1;
2613
2614
if (decode_instructions(file))
2615
return -1;
2616
2617
if (add_ignores(file))
2618
return -1;
2619
2620
add_uaccess_safe(file);
2621
2622
if (read_annotate(file, __annotate_early))
2623
return -1;
2624
2625
/*
2626
* Must be before add_jump_destinations(), which depends on 'func'
2627
* being set for alternatives, to enable proper sibling call detection.
2628
*/
2629
if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
2630
if (add_special_section_alts(file))
2631
return -1;
2632
}
2633
2634
if (add_jump_destinations(file))
2635
return -1;
2636
2637
/*
2638
* Must be before add_call_destination(); it changes INSN_CALL to
2639
* INSN_JUMP.
2640
*/
2641
if (read_annotate(file, __annotate_ifc))
2642
return -1;
2643
2644
if (add_call_destinations(file))
2645
return -1;
2646
2647
if (add_jump_table_alts(file))
2648
return -1;
2649
2650
if (read_unwind_hints(file))
2651
return -1;
2652
2653
/* Must be after add_jump_destinations() */
2654
mark_holes(file);
2655
2656
/*
2657
* Must be after add_call_destinations() such that it can override
2658
* dead_end_function() marks.
2659
*/
2660
if (read_annotate(file, __annotate_late))
2661
return -1;
2662
2663
return 0;
2664
}
2665
2666
static bool is_special_call(struct instruction *insn)
2667
{
2668
if (insn->type == INSN_CALL) {
2669
struct symbol *dest = insn_call_dest(insn);
2670
2671
if (!dest)
2672
return false;
2673
2674
if (dest->fentry || dest->embedded_insn)
2675
return true;
2676
}
2677
2678
return false;
2679
}
2680
2681
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2682
{
2683
struct cfi_state *cfi = &state->cfi;
2684
int i;
2685
2686
if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2687
return true;
2688
2689
if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2690
return true;
2691
2692
if (cfi->stack_size != initial_func_cfi.cfa.offset)
2693
return true;
2694
2695
for (i = 0; i < CFI_NUM_REGS; i++) {
2696
if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2697
cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2698
return true;
2699
}
2700
2701
return false;
2702
}
2703
2704
static bool check_reg_frame_pos(const struct cfi_reg *reg,
2705
int expected_offset)
2706
{
2707
return reg->base == CFI_CFA &&
2708
reg->offset == expected_offset;
2709
}
2710
2711
static bool has_valid_stack_frame(struct insn_state *state)
2712
{
2713
struct cfi_state *cfi = &state->cfi;
2714
2715
if (cfi->cfa.base == CFI_BP &&
2716
check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2717
check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2718
return true;
2719
2720
if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2721
return true;
2722
2723
return false;
2724
}
2725
2726
static int update_cfi_state_regs(struct instruction *insn,
2727
struct cfi_state *cfi,
2728
struct stack_op *op)
2729
{
2730
struct cfi_reg *cfa = &cfi->cfa;
2731
2732
if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2733
return 0;
2734
2735
/* push */
2736
if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2737
cfa->offset += 8;
2738
2739
/* pop */
2740
if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2741
cfa->offset -= 8;
2742
2743
/* add immediate to sp */
2744
if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2745
op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2746
cfa->offset -= op->src.offset;
2747
2748
return 0;
2749
}
2750
2751
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2752
{
2753
if (arch_callee_saved_reg(reg) &&
2754
cfi->regs[reg].base == CFI_UNDEFINED) {
2755
cfi->regs[reg].base = base;
2756
cfi->regs[reg].offset = offset;
2757
}
2758
}
2759
2760
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2761
{
2762
cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2763
cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2764
}
2765
2766
/*
2767
* A note about DRAP stack alignment:
2768
*
2769
* GCC has the concept of a DRAP register, which is used to help keep track of
2770
* the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2771
* register. The typical DRAP pattern is:
2772
*
2773
* 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2774
* 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2775
* 41 ff 72 f8 pushq -0x8(%r10)
2776
* 55 push %rbp
2777
* 48 89 e5 mov %rsp,%rbp
2778
* (more pushes)
2779
* 41 52 push %r10
2780
* ...
2781
* 41 5a pop %r10
2782
* (more pops)
2783
* 5d pop %rbp
2784
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2785
* c3 retq
2786
*
2787
* There are some variations in the epilogues, like:
2788
*
2789
* 5b pop %rbx
2790
* 41 5a pop %r10
2791
* 41 5c pop %r12
2792
* 41 5d pop %r13
2793
* 41 5e pop %r14
2794
* c9 leaveq
2795
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2796
* c3 retq
2797
*
2798
* and:
2799
*
2800
* 4c 8b 55 e8 mov -0x18(%rbp),%r10
2801
* 48 8b 5d e0 mov -0x20(%rbp),%rbx
2802
* 4c 8b 65 f0 mov -0x10(%rbp),%r12
2803
* 4c 8b 6d f8 mov -0x8(%rbp),%r13
2804
* c9 leaveq
2805
* 49 8d 62 f8 lea -0x8(%r10),%rsp
2806
* c3 retq
2807
*
2808
* Sometimes r13 is used as the DRAP register, in which case it's saved and
2809
* restored beforehand:
2810
*
2811
* 41 55 push %r13
2812
* 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2813
* 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2814
* ...
2815
* 49 8d 65 f0 lea -0x10(%r13),%rsp
2816
* 41 5d pop %r13
2817
* c3 retq
2818
*/
2819
static int update_cfi_state(struct instruction *insn,
2820
struct instruction *next_insn,
2821
struct cfi_state *cfi, struct stack_op *op)
2822
{
2823
struct cfi_reg *cfa = &cfi->cfa;
2824
struct cfi_reg *regs = cfi->regs;
2825
2826
/* ignore UNWIND_HINT_UNDEFINED regions */
2827
if (cfi->force_undefined)
2828
return 0;
2829
2830
/* stack operations don't make sense with an undefined CFA */
2831
if (cfa->base == CFI_UNDEFINED) {
2832
if (insn_func(insn)) {
2833
WARN_INSN(insn, "undefined stack state");
2834
return 1;
2835
}
2836
return 0;
2837
}
2838
2839
if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2840
cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2841
return update_cfi_state_regs(insn, cfi, op);
2842
2843
switch (op->dest.type) {
2844
2845
case OP_DEST_REG:
2846
switch (op->src.type) {
2847
2848
case OP_SRC_REG:
2849
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2850
cfa->base == CFI_SP &&
2851
check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2852
2853
/* mov %rsp, %rbp */
2854
cfa->base = op->dest.reg;
2855
cfi->bp_scratch = false;
2856
}
2857
2858
else if (op->src.reg == CFI_SP &&
2859
op->dest.reg == CFI_BP && cfi->drap) {
2860
2861
/* drap: mov %rsp, %rbp */
2862
regs[CFI_BP].base = CFI_BP;
2863
regs[CFI_BP].offset = -cfi->stack_size;
2864
cfi->bp_scratch = false;
2865
}
2866
2867
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2868
2869
/*
2870
* mov %rsp, %reg
2871
*
2872
* This is needed for the rare case where GCC
2873
* does:
2874
*
2875
* mov %rsp, %rax
2876
* ...
2877
* mov %rax, %rsp
2878
*/
2879
cfi->vals[op->dest.reg].base = CFI_CFA;
2880
cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2881
}
2882
2883
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2884
(cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2885
2886
/*
2887
* mov %rbp, %rsp
2888
*
2889
* Restore the original stack pointer (Clang).
2890
*/
2891
cfi->stack_size = -cfi->regs[CFI_BP].offset;
2892
}
2893
2894
else if (op->dest.reg == cfa->base) {
2895
2896
/* mov %reg, %rsp */
2897
if (cfa->base == CFI_SP &&
2898
cfi->vals[op->src.reg].base == CFI_CFA) {
2899
2900
/*
2901
* This is needed for the rare case
2902
* where GCC does something dumb like:
2903
*
2904
* lea 0x8(%rsp), %rcx
2905
* ...
2906
* mov %rcx, %rsp
2907
*/
2908
cfa->offset = -cfi->vals[op->src.reg].offset;
2909
cfi->stack_size = cfa->offset;
2910
2911
} else if (cfa->base == CFI_SP &&
2912
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2913
cfi->vals[op->src.reg].offset == cfa->offset) {
2914
2915
/*
2916
* Stack swizzle:
2917
*
2918
* 1: mov %rsp, (%[tos])
2919
* 2: mov %[tos], %rsp
2920
* ...
2921
* 3: pop %rsp
2922
*
2923
* Where:
2924
*
2925
* 1 - places a pointer to the previous
2926
* stack at the Top-of-Stack of the
2927
* new stack.
2928
*
2929
* 2 - switches to the new stack.
2930
*
2931
* 3 - pops the Top-of-Stack to restore
2932
* the original stack.
2933
*
2934
* Note: we set base to SP_INDIRECT
2935
* here and preserve offset. Therefore
2936
* when the unwinder reaches ToS it
2937
* will dereference SP and then add the
2938
* offset to find the next frame, IOW:
2939
* (%rsp) + offset.
2940
*/
2941
cfa->base = CFI_SP_INDIRECT;
2942
2943
} else {
2944
cfa->base = CFI_UNDEFINED;
2945
cfa->offset = 0;
2946
}
2947
}
2948
2949
else if (op->dest.reg == CFI_SP &&
2950
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2951
cfi->vals[op->src.reg].offset == cfa->offset) {
2952
2953
/*
2954
* The same stack swizzle case 2) as above. But
2955
* because we can't change cfa->base, case 3)
2956
* will become a regular POP. Pretend we're a
2957
* PUSH so things don't go unbalanced.
2958
*/
2959
cfi->stack_size += 8;
2960
}
2961
2962
2963
break;
2964
2965
case OP_SRC_ADD:
2966
if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2967
2968
/* add imm, %rsp */
2969
cfi->stack_size -= op->src.offset;
2970
if (cfa->base == CFI_SP)
2971
cfa->offset -= op->src.offset;
2972
break;
2973
}
2974
2975
if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
2976
insn->sym->frame_pointer) {
2977
/* addi.d fp,sp,imm on LoongArch */
2978
if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
2979
cfa->base = CFI_BP;
2980
cfa->offset = 0;
2981
}
2982
break;
2983
}
2984
2985
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2986
/* addi.d sp,fp,imm on LoongArch */
2987
if (cfa->base == CFI_BP && cfa->offset == 0) {
2988
if (insn->sym->frame_pointer) {
2989
cfa->base = CFI_SP;
2990
cfa->offset = -op->src.offset;
2991
}
2992
} else {
2993
/* lea disp(%rbp), %rsp */
2994
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2995
}
2996
break;
2997
}
2998
2999
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
3000
3001
/* drap: lea disp(%rsp), %drap */
3002
cfi->drap_reg = op->dest.reg;
3003
3004
/*
3005
* lea disp(%rsp), %reg
3006
*
3007
* This is needed for the rare case where GCC
3008
* does something dumb like:
3009
*
3010
* lea 0x8(%rsp), %rcx
3011
* ...
3012
* mov %rcx, %rsp
3013
*/
3014
cfi->vals[op->dest.reg].base = CFI_CFA;
3015
cfi->vals[op->dest.reg].offset = \
3016
-cfi->stack_size + op->src.offset;
3017
3018
break;
3019
}
3020
3021
if (cfi->drap && op->dest.reg == CFI_SP &&
3022
op->src.reg == cfi->drap_reg) {
3023
3024
/* drap: lea disp(%drap), %rsp */
3025
cfa->base = CFI_SP;
3026
cfa->offset = cfi->stack_size = -op->src.offset;
3027
cfi->drap_reg = CFI_UNDEFINED;
3028
cfi->drap = false;
3029
break;
3030
}
3031
3032
if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3033
WARN_INSN(insn, "unsupported stack register modification");
3034
return -1;
3035
}
3036
3037
break;
3038
3039
case OP_SRC_AND:
3040
if (op->dest.reg != CFI_SP ||
3041
(cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3042
(cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3043
WARN_INSN(insn, "unsupported stack pointer realignment");
3044
return -1;
3045
}
3046
3047
if (cfi->drap_reg != CFI_UNDEFINED) {
3048
/* drap: and imm, %rsp */
3049
cfa->base = cfi->drap_reg;
3050
cfa->offset = cfi->stack_size = 0;
3051
cfi->drap = true;
3052
}
3053
3054
/*
3055
* Older versions of GCC (4.8ish) realign the stack
3056
* without DRAP, with a frame pointer.
3057
*/
3058
3059
break;
3060
3061
case OP_SRC_POP:
3062
case OP_SRC_POPF:
3063
if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3064
3065
/* pop %rsp; # restore from a stack swizzle */
3066
cfa->base = CFI_SP;
3067
break;
3068
}
3069
3070
if (!cfi->drap && op->dest.reg == cfa->base) {
3071
3072
/* pop %rbp */
3073
cfa->base = CFI_SP;
3074
}
3075
3076
if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3077
op->dest.reg == cfi->drap_reg &&
3078
cfi->drap_offset == -cfi->stack_size) {
3079
3080
/* drap: pop %drap */
3081
cfa->base = cfi->drap_reg;
3082
cfa->offset = 0;
3083
cfi->drap_offset = -1;
3084
3085
} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3086
3087
/* pop %reg */
3088
restore_reg(cfi, op->dest.reg);
3089
}
3090
3091
cfi->stack_size -= 8;
3092
if (cfa->base == CFI_SP)
3093
cfa->offset -= 8;
3094
3095
break;
3096
3097
case OP_SRC_REG_INDIRECT:
3098
if (!cfi->drap && op->dest.reg == cfa->base &&
3099
op->dest.reg == CFI_BP) {
3100
3101
/* mov disp(%rsp), %rbp */
3102
cfa->base = CFI_SP;
3103
cfa->offset = cfi->stack_size;
3104
}
3105
3106
if (cfi->drap && op->src.reg == CFI_BP &&
3107
op->src.offset == cfi->drap_offset) {
3108
3109
/* drap: mov disp(%rbp), %drap */
3110
cfa->base = cfi->drap_reg;
3111
cfa->offset = 0;
3112
cfi->drap_offset = -1;
3113
}
3114
3115
if (cfi->drap && op->src.reg == CFI_BP &&
3116
op->src.offset == regs[op->dest.reg].offset) {
3117
3118
/* drap: mov disp(%rbp), %reg */
3119
restore_reg(cfi, op->dest.reg);
3120
3121
} else if (op->src.reg == cfa->base &&
3122
op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3123
3124
/* mov disp(%rbp), %reg */
3125
/* mov disp(%rsp), %reg */
3126
restore_reg(cfi, op->dest.reg);
3127
3128
} else if (op->src.reg == CFI_SP &&
3129
op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3130
3131
/* mov disp(%rsp), %reg */
3132
restore_reg(cfi, op->dest.reg);
3133
}
3134
3135
break;
3136
3137
default:
3138
WARN_INSN(insn, "unknown stack-related instruction");
3139
return -1;
3140
}
3141
3142
break;
3143
3144
case OP_DEST_PUSH:
3145
case OP_DEST_PUSHF:
3146
cfi->stack_size += 8;
3147
if (cfa->base == CFI_SP)
3148
cfa->offset += 8;
3149
3150
if (op->src.type != OP_SRC_REG)
3151
break;
3152
3153
if (cfi->drap) {
3154
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3155
3156
/* drap: push %drap */
3157
cfa->base = CFI_BP_INDIRECT;
3158
cfa->offset = -cfi->stack_size;
3159
3160
/* save drap so we know when to restore it */
3161
cfi->drap_offset = -cfi->stack_size;
3162
3163
} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3164
3165
/* drap: push %rbp */
3166
cfi->stack_size = 0;
3167
3168
} else {
3169
3170
/* drap: push %reg */
3171
save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3172
}
3173
3174
} else {
3175
3176
/* push %reg */
3177
save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3178
}
3179
3180
/* detect when asm code uses rbp as a scratch register */
3181
if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3182
cfa->base != CFI_BP)
3183
cfi->bp_scratch = true;
3184
break;
3185
3186
case OP_DEST_REG_INDIRECT:
3187
3188
if (cfi->drap) {
3189
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3190
3191
/* drap: mov %drap, disp(%rbp) */
3192
cfa->base = CFI_BP_INDIRECT;
3193
cfa->offset = op->dest.offset;
3194
3195
/* save drap offset so we know when to restore it */
3196
cfi->drap_offset = op->dest.offset;
3197
} else {
3198
3199
/* drap: mov reg, disp(%rbp) */
3200
save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3201
}
3202
3203
} else if (op->dest.reg == cfa->base) {
3204
3205
/* mov reg, disp(%rbp) */
3206
/* mov reg, disp(%rsp) */
3207
save_reg(cfi, op->src.reg, CFI_CFA,
3208
op->dest.offset - cfi->cfa.offset);
3209
3210
} else if (op->dest.reg == CFI_SP) {
3211
3212
/* mov reg, disp(%rsp) */
3213
save_reg(cfi, op->src.reg, CFI_CFA,
3214
op->dest.offset - cfi->stack_size);
3215
3216
} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3217
3218
/* mov %rsp, (%reg); # setup a stack swizzle. */
3219
cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3220
cfi->vals[op->dest.reg].offset = cfa->offset;
3221
}
3222
3223
break;
3224
3225
case OP_DEST_MEM:
3226
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3227
WARN_INSN(insn, "unknown stack-related memory operation");
3228
return -1;
3229
}
3230
3231
/* pop mem */
3232
cfi->stack_size -= 8;
3233
if (cfa->base == CFI_SP)
3234
cfa->offset -= 8;
3235
3236
break;
3237
3238
default:
3239
WARN_INSN(insn, "unknown stack-related instruction");
3240
return -1;
3241
}
3242
3243
return 0;
3244
}
3245
3246
/*
3247
* The stack layouts of alternatives instructions can sometimes diverge when
3248
* they have stack modifications. That's fine as long as the potential stack
3249
* layouts don't conflict at any given potential instruction boundary.
3250
*
3251
* Flatten the CFIs of the different alternative code streams (both original
3252
* and replacement) into a single shared CFI array which can be used to detect
3253
* conflicts and nicely feed a linear array of ORC entries to the unwinder.
3254
*/
3255
static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3256
{
3257
struct cfi_state **alt_cfi;
3258
int group_off;
3259
3260
if (!insn->alt_group)
3261
return 0;
3262
3263
if (!insn->cfi) {
3264
WARN("CFI missing");
3265
return -1;
3266
}
3267
3268
alt_cfi = insn->alt_group->cfi;
3269
group_off = insn->offset - insn->alt_group->first_insn->offset;
3270
3271
if (!alt_cfi[group_off]) {
3272
alt_cfi[group_off] = insn->cfi;
3273
} else {
3274
if (cficmp(alt_cfi[group_off], insn->cfi)) {
3275
struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3276
struct instruction *orig = orig_group->first_insn;
3277
WARN_INSN(orig, "stack layout conflict in alternatives: %s",
3278
offstr(insn->sec, insn->offset));
3279
return -1;
3280
}
3281
}
3282
3283
return 0;
3284
}
3285
3286
static int noinline handle_insn_ops(struct instruction *insn,
3287
struct instruction *next_insn,
3288
struct insn_state *state)
3289
{
3290
struct insn_state prev_state __maybe_unused = *state;
3291
struct stack_op *op;
3292
int ret = 0;
3293
3294
for (op = insn->stack_ops; op; op = op->next) {
3295
3296
ret = update_cfi_state(insn, next_insn, &state->cfi, op);
3297
if (ret)
3298
goto done;
3299
3300
if (!opts.uaccess || !insn->alt_group)
3301
continue;
3302
3303
if (op->dest.type == OP_DEST_PUSHF) {
3304
if (!state->uaccess_stack) {
3305
state->uaccess_stack = 1;
3306
} else if (state->uaccess_stack >> 31) {
3307
WARN_INSN(insn, "PUSHF stack exhausted");
3308
ret = 1;
3309
goto done;
3310
}
3311
state->uaccess_stack <<= 1;
3312
state->uaccess_stack |= state->uaccess;
3313
}
3314
3315
if (op->src.type == OP_SRC_POPF) {
3316
if (state->uaccess_stack) {
3317
state->uaccess = state->uaccess_stack & 1;
3318
state->uaccess_stack >>= 1;
3319
if (state->uaccess_stack == 1)
3320
state->uaccess_stack = 0;
3321
}
3322
}
3323
}
3324
3325
done:
3326
TRACE_INSN_STATE(insn, &prev_state, state);
3327
3328
return ret;
3329
}
3330
3331
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3332
{
3333
struct cfi_state *cfi1 = insn->cfi;
3334
int i;
3335
3336
if (!cfi1) {
3337
WARN("CFI missing");
3338
return false;
3339
}
3340
3341
if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3342
3343
WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3344
cfi1->cfa.base, cfi1->cfa.offset,
3345
cfi2->cfa.base, cfi2->cfa.offset);
3346
return false;
3347
3348
}
3349
3350
if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3351
for (i = 0; i < CFI_NUM_REGS; i++) {
3352
3353
if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
3354
continue;
3355
3356
WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3357
i, cfi1->regs[i].base, cfi1->regs[i].offset,
3358
i, cfi2->regs[i].base, cfi2->regs[i].offset);
3359
}
3360
return false;
3361
}
3362
3363
if (cfi1->type != cfi2->type) {
3364
3365
WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3366
cfi1->type, cfi2->type);
3367
return false;
3368
}
3369
3370
if (cfi1->drap != cfi2->drap ||
3371
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3372
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3373
3374
WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3375
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3376
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3377
return false;
3378
}
3379
3380
return true;
3381
}
3382
3383
static inline bool func_uaccess_safe(struct symbol *func)
3384
{
3385
if (func)
3386
return func->uaccess_safe;
3387
3388
return false;
3389
}
3390
3391
static inline const char *call_dest_name(struct instruction *insn)
3392
{
3393
static char pvname[19];
3394
struct reloc *reloc;
3395
int idx;
3396
3397
if (insn_call_dest(insn))
3398
return insn_call_dest(insn)->name;
3399
3400
reloc = insn_reloc(NULL, insn);
3401
if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3402
idx = (reloc_addend(reloc) / sizeof(void *));
3403
snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3404
return pvname;
3405
}
3406
3407
return "{dynamic}";
3408
}
3409
3410
static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3411
{
3412
struct symbol *target;
3413
struct reloc *reloc;
3414
int idx;
3415
3416
reloc = insn_reloc(file, insn);
3417
if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3418
return false;
3419
3420
idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
3421
3422
if (file->pv_ops[idx].clean)
3423
return true;
3424
3425
file->pv_ops[idx].clean = true;
3426
3427
list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3428
if (!target->sec->noinstr) {
3429
WARN("pv_ops[%d]: %s", idx, target->name);
3430
file->pv_ops[idx].clean = false;
3431
}
3432
}
3433
3434
return file->pv_ops[idx].clean;
3435
}
3436
3437
static inline bool noinstr_call_dest(struct objtool_file *file,
3438
struct instruction *insn,
3439
struct symbol *func)
3440
{
3441
/*
3442
* We can't deal with indirect function calls at present;
3443
* assume they're instrumented.
3444
*/
3445
if (!func) {
3446
if (file->pv_ops)
3447
return pv_call_dest(file, insn);
3448
3449
return false;
3450
}
3451
3452
/*
3453
* If the symbol is from a noinstr section; we good.
3454
*/
3455
if (func->sec->noinstr)
3456
return true;
3457
3458
/*
3459
* If the symbol is a static_call trampoline, we can't tell.
3460
*/
3461
if (func->static_call_tramp)
3462
return true;
3463
3464
/*
3465
* The __ubsan_handle_*() calls are like WARN(), they only happen when
3466
* something 'BAD' happened. At the risk of taking the machine down,
3467
* let them proceed to get the message out.
3468
*/
3469
if (!strncmp(func->name, "__ubsan_handle_", 15))
3470
return true;
3471
3472
return false;
3473
}
3474
3475
static int validate_call(struct objtool_file *file,
3476
struct instruction *insn,
3477
struct insn_state *state)
3478
{
3479
if (state->noinstr && state->instr <= 0 &&
3480
!noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3481
WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3482
return 1;
3483
}
3484
3485
if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3486
WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3487
return 1;
3488
}
3489
3490
if (state->df) {
3491
WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3492
return 1;
3493
}
3494
3495
return 0;
3496
}
3497
3498
static int validate_sibling_call(struct objtool_file *file,
3499
struct instruction *insn,
3500
struct insn_state *state)
3501
{
3502
if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3503
WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3504
return 1;
3505
}
3506
3507
return validate_call(file, insn, state);
3508
}
3509
3510
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3511
{
3512
if (state->noinstr && state->instr > 0) {
3513
WARN_INSN(insn, "return with instrumentation enabled");
3514
return 1;
3515
}
3516
3517
if (state->uaccess && !func_uaccess_safe(func)) {
3518
WARN_INSN(insn, "return with UACCESS enabled");
3519
return 1;
3520
}
3521
3522
if (!state->uaccess && func_uaccess_safe(func)) {
3523
WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3524
return 1;
3525
}
3526
3527
if (state->df) {
3528
WARN_INSN(insn, "return with DF set");
3529
return 1;
3530
}
3531
3532
if (func && has_modified_stack_frame(insn, state)) {
3533
WARN_INSN(insn, "return with modified stack frame");
3534
return 1;
3535
}
3536
3537
if (state->cfi.bp_scratch) {
3538
WARN_INSN(insn, "BP used as a scratch register");
3539
return 1;
3540
}
3541
3542
return 0;
3543
}
3544
3545
static struct instruction *next_insn_to_validate(struct objtool_file *file,
3546
struct instruction *insn)
3547
{
3548
struct alt_group *alt_group = insn->alt_group;
3549
3550
/*
3551
* Simulate the fact that alternatives are patched in-place. When the
3552
* end of a replacement alt_group is reached, redirect objtool flow to
3553
* the end of the original alt_group.
3554
*
3555
* insn->alts->insn -> alt_group->first_insn
3556
* ...
3557
* alt_group->last_insn
3558
* [alt_group->nop] -> next(orig_group->last_insn)
3559
*/
3560
if (alt_group) {
3561
if (alt_group->nop) {
3562
/* ->nop implies ->orig_group */
3563
if (insn == alt_group->last_insn)
3564
return alt_group->nop;
3565
if (insn == alt_group->nop)
3566
goto next_orig;
3567
}
3568
if (insn == alt_group->last_insn && alt_group->orig_group)
3569
goto next_orig;
3570
}
3571
3572
return next_insn_same_sec(file, insn);
3573
3574
next_orig:
3575
return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3576
}
3577
3578
static bool skip_alt_group(struct instruction *insn)
3579
{
3580
struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
3581
3582
if (!insn->alt_group)
3583
return false;
3584
3585
/* ANNOTATE_IGNORE_ALTERNATIVE */
3586
if (insn->alt_group->ignore) {
3587
TRACE_ALT(insn, "alt group ignored");
3588
return true;
3589
}
3590
3591
/*
3592
* For NOP patched with CLAC/STAC, only follow the latter to avoid
3593
* impossible code paths combining patched CLAC with unpatched STAC
3594
* or vice versa.
3595
*
3596
* ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3597
* requested not to do that to avoid hurting .s file readability
3598
* around CLAC/STAC alternative sites.
3599
*/
3600
3601
if (!alt_insn)
3602
return false;
3603
3604
/* Don't override ASM_{CLAC,STAC}_UNSAFE */
3605
if (alt_insn->alt_group && alt_insn->alt_group->ignore)
3606
return false;
3607
3608
return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
3609
}
3610
3611
static int checksum_debug_init(struct objtool_file *file)
3612
{
3613
char *dup, *s;
3614
3615
if (!opts.debug_checksum)
3616
return 0;
3617
3618
dup = strdup(opts.debug_checksum);
3619
if (!dup) {
3620
ERROR_GLIBC("strdup");
3621
return -1;
3622
}
3623
3624
s = dup;
3625
while (*s) {
3626
struct symbol *func;
3627
char *comma;
3628
3629
comma = strchr(s, ',');
3630
if (comma)
3631
*comma = '\0';
3632
3633
func = find_symbol_by_name(file->elf, s);
3634
if (!func || !is_func_sym(func))
3635
WARN("--debug-checksum: can't find '%s'", s);
3636
else
3637
func->debug_checksum = 1;
3638
3639
if (!comma)
3640
break;
3641
3642
s = comma + 1;
3643
}
3644
3645
free(dup);
3646
return 0;
3647
}
3648
3649
static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
3650
struct instruction *insn)
3651
{
3652
struct reloc *reloc = insn_reloc(file, insn);
3653
unsigned long offset;
3654
struct symbol *sym;
3655
3656
if (insn->fake)
3657
return;
3658
3659
checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
3660
3661
if (!reloc) {
3662
struct symbol *call_dest = insn_call_dest(insn);
3663
3664
if (call_dest)
3665
checksum_update(func, insn, call_dest->demangled_name,
3666
strlen(call_dest->demangled_name));
3667
return;
3668
}
3669
3670
sym = reloc->sym;
3671
offset = arch_insn_adjusted_addend(insn, reloc);
3672
3673
if (is_string_sec(sym->sec)) {
3674
char *str;
3675
3676
str = sym->sec->data->d_buf + sym->offset + offset;
3677
checksum_update(func, insn, str, strlen(str));
3678
return;
3679
}
3680
3681
if (is_sec_sym(sym)) {
3682
sym = find_symbol_containing(reloc->sym->sec, offset);
3683
if (!sym)
3684
return;
3685
3686
offset -= sym->offset;
3687
}
3688
3689
checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
3690
checksum_update(func, insn, &offset, sizeof(offset));
3691
}
3692
3693
static int validate_branch(struct objtool_file *file, struct symbol *func,
3694
struct instruction *insn, struct insn_state state);
3695
static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3696
struct instruction *insn, struct insn_state state);
3697
3698
static int validate_insn(struct objtool_file *file, struct symbol *func,
3699
struct instruction *insn, struct insn_state *statep,
3700
struct instruction *prev_insn, struct instruction *next_insn,
3701
bool *dead_end)
3702
{
3703
char *alt_name __maybe_unused = NULL;
3704
struct alternative *alt;
3705
u8 visited;
3706
int ret;
3707
3708
/*
3709
* Any returns before the end of this function are effectively dead
3710
* ends, i.e. validate_branch() has reached the end of the branch.
3711
*/
3712
*dead_end = true;
3713
3714
visited = VISITED_BRANCH << statep->uaccess;
3715
if (insn->visited & VISITED_BRANCH_MASK) {
3716
if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
3717
return 1;
3718
3719
if (insn->visited & visited) {
3720
TRACE_INSN(insn, "already visited");
3721
return 0;
3722
}
3723
} else {
3724
nr_insns_visited++;
3725
}
3726
3727
if (statep->noinstr)
3728
statep->instr += insn->instr;
3729
3730
if (insn->hint) {
3731
if (insn->restore) {
3732
struct instruction *save_insn, *i;
3733
3734
i = insn;
3735
save_insn = NULL;
3736
3737
sym_for_each_insn_continue_reverse(file, func, i) {
3738
if (i->save) {
3739
save_insn = i;
3740
break;
3741
}
3742
}
3743
3744
if (!save_insn) {
3745
WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3746
return 1;
3747
}
3748
3749
if (!save_insn->visited) {
3750
/*
3751
* If the restore hint insn is at the
3752
* beginning of a basic block and was
3753
* branched to from elsewhere, and the
3754
* save insn hasn't been visited yet,
3755
* defer following this branch for now.
3756
* It will be seen later via the
3757
* straight-line path.
3758
*/
3759
if (!prev_insn) {
3760
TRACE_INSN(insn, "defer restore");
3761
return 0;
3762
}
3763
3764
WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3765
return 1;
3766
}
3767
3768
insn->cfi = save_insn->cfi;
3769
nr_cfi_reused++;
3770
}
3771
3772
statep->cfi = *insn->cfi;
3773
} else {
3774
/* XXX track if we actually changed statep->cfi */
3775
3776
if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
3777
insn->cfi = prev_insn->cfi;
3778
nr_cfi_reused++;
3779
} else {
3780
insn->cfi = cfi_hash_find_or_add(&statep->cfi);
3781
}
3782
}
3783
3784
insn->visited |= visited;
3785
3786
if (propagate_alt_cfi(file, insn))
3787
return 1;
3788
3789
if (insn->alts) {
3790
for (alt = insn->alts; alt; alt = alt->next) {
3791
TRACE_ALT_BEGIN(insn, alt, alt_name);
3792
ret = validate_branch(file, func, alt->insn, *statep);
3793
TRACE_ALT_END(insn, alt, alt_name);
3794
if (ret) {
3795
BT_INSN(insn, "(alt)");
3796
return ret;
3797
}
3798
}
3799
TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
3800
}
3801
3802
if (skip_alt_group(insn))
3803
return 0;
3804
3805
if (handle_insn_ops(insn, next_insn, statep))
3806
return 1;
3807
3808
switch (insn->type) {
3809
3810
case INSN_RETURN:
3811
TRACE_INSN(insn, "return");
3812
return validate_return(func, insn, statep);
3813
3814
case INSN_CALL:
3815
case INSN_CALL_DYNAMIC:
3816
if (insn->type == INSN_CALL)
3817
TRACE_INSN(insn, "call");
3818
else
3819
TRACE_INSN(insn, "indirect call");
3820
3821
ret = validate_call(file, insn, statep);
3822
if (ret)
3823
return ret;
3824
3825
if (opts.stackval && func && !is_special_call(insn) &&
3826
!has_valid_stack_frame(statep)) {
3827
WARN_INSN(insn, "call without frame pointer save/setup");
3828
return 1;
3829
}
3830
3831
break;
3832
3833
case INSN_JUMP_CONDITIONAL:
3834
case INSN_JUMP_UNCONDITIONAL:
3835
if (is_sibling_call(insn)) {
3836
TRACE_INSN(insn, "sibling call");
3837
ret = validate_sibling_call(file, insn, statep);
3838
if (ret)
3839
return ret;
3840
3841
} else if (insn->jump_dest) {
3842
if (insn->type == INSN_JUMP_UNCONDITIONAL)
3843
TRACE_INSN(insn, "unconditional jump");
3844
else
3845
TRACE_INSN(insn, "jump taken");
3846
3847
ret = validate_branch(file, func, insn->jump_dest, *statep);
3848
if (ret) {
3849
BT_INSN(insn, "(branch)");
3850
return ret;
3851
}
3852
}
3853
3854
if (insn->type == INSN_JUMP_UNCONDITIONAL)
3855
return 0;
3856
3857
TRACE_INSN(insn, "jump not taken");
3858
break;
3859
3860
case INSN_JUMP_DYNAMIC:
3861
case INSN_JUMP_DYNAMIC_CONDITIONAL:
3862
TRACE_INSN(insn, "indirect jump");
3863
if (is_sibling_call(insn)) {
3864
ret = validate_sibling_call(file, insn, statep);
3865
if (ret)
3866
return ret;
3867
}
3868
3869
if (insn->type == INSN_JUMP_DYNAMIC)
3870
return 0;
3871
3872
break;
3873
3874
case INSN_SYSCALL:
3875
TRACE_INSN(insn, "syscall");
3876
if (func && (!next_insn || !next_insn->hint)) {
3877
WARN_INSN(insn, "unsupported instruction in callable function");
3878
return 1;
3879
}
3880
3881
break;
3882
3883
case INSN_SYSRET:
3884
TRACE_INSN(insn, "sysret");
3885
if (func && (!next_insn || !next_insn->hint)) {
3886
WARN_INSN(insn, "unsupported instruction in callable function");
3887
return 1;
3888
}
3889
3890
return 0;
3891
3892
case INSN_STAC:
3893
TRACE_INSN(insn, "stac");
3894
if (!opts.uaccess)
3895
break;
3896
3897
if (statep->uaccess) {
3898
WARN_INSN(insn, "recursive UACCESS enable");
3899
return 1;
3900
}
3901
3902
statep->uaccess = true;
3903
break;
3904
3905
case INSN_CLAC:
3906
TRACE_INSN(insn, "clac");
3907
if (!opts.uaccess)
3908
break;
3909
3910
if (!statep->uaccess && func) {
3911
WARN_INSN(insn, "redundant UACCESS disable");
3912
return 1;
3913
}
3914
3915
if (func_uaccess_safe(func) && !statep->uaccess_stack) {
3916
WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3917
return 1;
3918
}
3919
3920
statep->uaccess = false;
3921
break;
3922
3923
case INSN_STD:
3924
TRACE_INSN(insn, "std");
3925
if (statep->df) {
3926
WARN_INSN(insn, "recursive STD");
3927
return 1;
3928
}
3929
3930
statep->df = true;
3931
break;
3932
3933
case INSN_CLD:
3934
TRACE_INSN(insn, "cld");
3935
if (!statep->df && func) {
3936
WARN_INSN(insn, "redundant CLD");
3937
return 1;
3938
}
3939
3940
statep->df = false;
3941
break;
3942
3943
default:
3944
break;
3945
}
3946
3947
if (insn->dead_end)
3948
TRACE_INSN(insn, "dead end");
3949
3950
*dead_end = insn->dead_end;
3951
return 0;
3952
}
3953
3954
/*
3955
* Follow the branch starting at the given instruction, and recursively follow
3956
* any other branches (jumps). Meanwhile, track the frame pointer state at
3957
* each instruction and validate all the rules described in
3958
* tools/objtool/Documentation/objtool.txt.
3959
*/
3960
static int do_validate_branch(struct objtool_file *file, struct symbol *func,
3961
struct instruction *insn, struct insn_state state)
3962
{
3963
struct instruction *next_insn, *prev_insn = NULL;
3964
bool dead_end;
3965
int ret;
3966
3967
if (func && func->ignore)
3968
return 0;
3969
3970
do {
3971
insn->trace = 0;
3972
next_insn = next_insn_to_validate(file, insn);
3973
3974
if (opts.checksum && func && insn->sec)
3975
checksum_update_insn(file, func, insn);
3976
3977
if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3978
/* Ignore KCFI type preambles, which always fall through */
3979
if (is_prefix_func(func))
3980
return 0;
3981
3982
if (file->ignore_unreachables)
3983
return 0;
3984
3985
WARN("%s() falls through to next function %s()",
3986
func->name, insn_func(insn)->name);
3987
func->warned = 1;
3988
3989
return 1;
3990
}
3991
3992
ret = validate_insn(file, func, insn, &state, prev_insn, next_insn,
3993
&dead_end);
3994
3995
if (!insn->trace) {
3996
if (ret)
3997
TRACE_INSN(insn, "warning (%d)", ret);
3998
else
3999
TRACE_INSN(insn, NULL);
4000
}
4001
4002
if (!dead_end && !next_insn) {
4003
if (state.cfi.cfa.base == CFI_UNDEFINED)
4004
return 0;
4005
if (file->ignore_unreachables)
4006
return 0;
4007
4008
WARN("%s%sunexpected end of section %s",
4009
func ? func->name : "", func ? "(): " : "",
4010
insn->sec->name);
4011
return 1;
4012
}
4013
4014
prev_insn = insn;
4015
insn = next_insn;
4016
4017
} while (!dead_end);
4018
4019
return ret;
4020
}
4021
4022
static int validate_branch(struct objtool_file *file, struct symbol *func,
4023
struct instruction *insn, struct insn_state state)
4024
{
4025
int ret;
4026
4027
trace_depth_inc();
4028
ret = do_validate_branch(file, func, insn, state);
4029
trace_depth_dec();
4030
4031
return ret;
4032
}
4033
4034
static int validate_unwind_hint(struct objtool_file *file,
4035
struct instruction *insn,
4036
struct insn_state *state)
4037
{
4038
if (insn->hint && !insn->visited) {
4039
struct symbol *func = insn_func(insn);
4040
int ret;
4041
4042
if (opts.checksum)
4043
checksum_init(func);
4044
4045
ret = validate_branch(file, func, insn, *state);
4046
if (ret)
4047
BT_INSN(insn, "<=== (hint)");
4048
return ret;
4049
}
4050
4051
return 0;
4052
}
4053
4054
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
4055
{
4056
struct instruction *insn;
4057
struct insn_state state;
4058
int warnings = 0;
4059
4060
if (!file->hints)
4061
return 0;
4062
4063
init_insn_state(file, &state, sec);
4064
4065
if (sec) {
4066
sec_for_each_insn(file, sec, insn)
4067
warnings += validate_unwind_hint(file, insn, &state);
4068
} else {
4069
for_each_insn(file, insn)
4070
warnings += validate_unwind_hint(file, insn, &state);
4071
}
4072
4073
return warnings;
4074
}
4075
4076
/*
4077
* Validate rethunk entry constraint: must untrain RET before the first RET.
4078
*
4079
* Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
4080
* before an actual RET instruction.
4081
*/
4082
static int validate_unret(struct objtool_file *file, struct instruction *insn)
4083
{
4084
struct instruction *next, *dest;
4085
int ret;
4086
4087
for (;;) {
4088
next = next_insn_to_validate(file, insn);
4089
4090
if (insn->visited & VISITED_UNRET)
4091
return 0;
4092
4093
insn->visited |= VISITED_UNRET;
4094
4095
if (insn->alts) {
4096
struct alternative *alt;
4097
for (alt = insn->alts; alt; alt = alt->next) {
4098
ret = validate_unret(file, alt->insn);
4099
if (ret) {
4100
BT_INSN(insn, "(alt)");
4101
return ret;
4102
}
4103
}
4104
}
4105
4106
switch (insn->type) {
4107
4108
case INSN_CALL_DYNAMIC:
4109
case INSN_JUMP_DYNAMIC:
4110
case INSN_JUMP_DYNAMIC_CONDITIONAL:
4111
WARN_INSN(insn, "early indirect call");
4112
return 1;
4113
4114
case INSN_JUMP_UNCONDITIONAL:
4115
case INSN_JUMP_CONDITIONAL:
4116
if (!is_sibling_call(insn)) {
4117
if (!insn->jump_dest) {
4118
WARN_INSN(insn, "unresolved jump target after linking?!?");
4119
return 1;
4120
}
4121
ret = validate_unret(file, insn->jump_dest);
4122
if (ret) {
4123
BT_INSN(insn, "(branch%s)",
4124
insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
4125
return ret;
4126
}
4127
4128
if (insn->type == INSN_JUMP_UNCONDITIONAL)
4129
return 0;
4130
4131
break;
4132
}
4133
4134
/* fallthrough */
4135
case INSN_CALL:
4136
dest = find_insn(file, insn_call_dest(insn)->sec,
4137
insn_call_dest(insn)->offset);
4138
if (!dest) {
4139
WARN("Unresolved function after linking!?: %s",
4140
insn_call_dest(insn)->name);
4141
return 1;
4142
}
4143
4144
ret = validate_unret(file, dest);
4145
if (ret) {
4146
BT_INSN(insn, "(call)");
4147
return ret;
4148
}
4149
/*
4150
* If a call returns without error, it must have seen UNTRAIN_RET.
4151
* Therefore any non-error return is a success.
4152
*/
4153
return 0;
4154
4155
case INSN_RETURN:
4156
WARN_INSN(insn, "RET before UNTRAIN");
4157
return 1;
4158
4159
case INSN_SYSCALL:
4160
break;
4161
4162
case INSN_SYSRET:
4163
return 0;
4164
4165
case INSN_NOP:
4166
if (insn->retpoline_safe)
4167
return 0;
4168
break;
4169
4170
default:
4171
break;
4172
}
4173
4174
if (insn->dead_end)
4175
return 0;
4176
4177
if (!next) {
4178
WARN_INSN(insn, "teh end!");
4179
return 1;
4180
}
4181
insn = next;
4182
}
4183
4184
return 0;
4185
}
4186
4187
/*
4188
* Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
4189
* VALIDATE_UNRET_END before RET.
4190
*/
4191
static int validate_unrets(struct objtool_file *file)
4192
{
4193
struct instruction *insn;
4194
int warnings = 0;
4195
4196
for_each_insn(file, insn) {
4197
if (!insn->unret)
4198
continue;
4199
4200
warnings += validate_unret(file, insn);
4201
}
4202
4203
return warnings;
4204
}
4205
4206
static int validate_retpoline(struct objtool_file *file)
4207
{
4208
struct instruction *insn;
4209
int warnings = 0;
4210
4211
for_each_insn(file, insn) {
4212
if (insn->type != INSN_JUMP_DYNAMIC &&
4213
insn->type != INSN_CALL_DYNAMIC &&
4214
insn->type != INSN_RETURN)
4215
continue;
4216
4217
if (insn->retpoline_safe)
4218
continue;
4219
4220
if (insn->sec->init)
4221
continue;
4222
4223
if (insn->type == INSN_RETURN) {
4224
if (opts.rethunk) {
4225
WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
4226
warnings++;
4227
}
4228
continue;
4229
}
4230
4231
WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
4232
insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4233
warnings++;
4234
}
4235
4236
if (!opts.cfi)
4237
return warnings;
4238
4239
/*
4240
* kCFI call sites look like:
4241
*
4242
* movl $(-0x12345678), %r10d
4243
* addl -4(%r11), %r10d
4244
* jz 1f
4245
* ud2
4246
* 1: cs call __x86_indirect_thunk_r11
4247
*
4248
* Verify all indirect calls are kCFI adorned by checking for the
4249
* UD2. Notably, doing __nocfi calls to regular (cfi) functions is
4250
* broken.
4251
*/
4252
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
4253
struct symbol *sym = insn->sym;
4254
4255
if (sym && (sym->type == STT_NOTYPE ||
4256
sym->type == STT_FUNC) && !sym->nocfi) {
4257
struct instruction *prev =
4258
prev_insn_same_sym(file, insn);
4259
4260
if (!prev || prev->type != INSN_BUG) {
4261
WARN_INSN(insn, "no-cfi indirect call!");
4262
warnings++;
4263
}
4264
}
4265
}
4266
4267
return warnings;
4268
}
4269
4270
static bool is_kasan_insn(struct instruction *insn)
4271
{
4272
return (insn->type == INSN_CALL &&
4273
!strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4274
}
4275
4276
static bool is_ubsan_insn(struct instruction *insn)
4277
{
4278
return (insn->type == INSN_CALL &&
4279
!strcmp(insn_call_dest(insn)->name,
4280
"__ubsan_handle_builtin_unreachable"));
4281
}
4282
4283
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4284
{
4285
struct symbol *func = insn_func(insn);
4286
struct instruction *prev_insn;
4287
int i;
4288
4289
if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
4290
insn->hole || (func && func->ignore))
4291
return true;
4292
4293
/*
4294
* Ignore alternative replacement instructions. This can happen
4295
* when a whitelisted function uses one of the ALTERNATIVE macros.
4296
*/
4297
if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4298
!strcmp(insn->sec->name, ".altinstr_aux"))
4299
return true;
4300
4301
if (!func)
4302
return false;
4303
4304
if (func->static_call_tramp)
4305
return true;
4306
4307
/*
4308
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4309
* __builtin_unreachable(). The BUG() macro has an unreachable() after
4310
* the UD2, which causes GCC's undefined trap logic to emit another UD2
4311
* (or occasionally a JMP to UD2).
4312
*
4313
* It may also insert a UD2 after calling a __noreturn function.
4314
*/
4315
prev_insn = prev_insn_same_sec(file, insn);
4316
if (prev_insn && prev_insn->dead_end &&
4317
(insn->type == INSN_BUG ||
4318
(insn->type == INSN_JUMP_UNCONDITIONAL &&
4319
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4320
return true;
4321
4322
/*
4323
* Check if this (or a subsequent) instruction is related to
4324
* CONFIG_UBSAN or CONFIG_KASAN.
4325
*
4326
* End the search at 5 instructions to avoid going into the weeds.
4327
*/
4328
for (i = 0; i < 5; i++) {
4329
4330
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4331
return true;
4332
4333
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4334
if (insn->jump_dest &&
4335
insn_func(insn->jump_dest) == func) {
4336
insn = insn->jump_dest;
4337
continue;
4338
}
4339
4340
break;
4341
}
4342
4343
if (insn->offset + insn->len >= func->offset + func->len)
4344
break;
4345
4346
insn = next_insn_same_sec(file, insn);
4347
}
4348
4349
return false;
4350
}
4351
4352
/*
4353
* For FineIBT or kCFI, a certain number of bytes preceding the function may be
4354
* NOPs. Those NOPs may be rewritten at runtime and executed, so give them a
4355
* proper function name: __pfx_<func>.
4356
*
4357
* The NOPs may not exist for the following cases:
4358
*
4359
* - compiler cloned functions (*.cold, *.part0, etc)
4360
* - asm functions created with inline asm or without SYM_FUNC_START()
4361
*
4362
* Also, the function may already have a prefix from a previous objtool run
4363
* (livepatch extracted functions, or manually running objtool multiple times).
4364
*
4365
* So return 0 if the NOPs are missing or the function already has a prefix
4366
* symbol.
4367
*/
4368
static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
4369
{
4370
struct instruction *insn, *prev;
4371
char name[SYM_NAME_LEN];
4372
struct cfi_state *cfi;
4373
4374
if (!is_func_sym(func) || is_prefix_func(func) ||
4375
func->cold || func->static_call_tramp)
4376
return 0;
4377
4378
if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
4379
WARN("%s: symbol name too long, can't create __pfx_ symbol",
4380
func->name);
4381
return 0;
4382
}
4383
4384
if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
4385
return -1;
4386
4387
if (file->klp) {
4388
struct symbol *pfx;
4389
4390
pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
4391
if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
4392
return 0;
4393
}
4394
4395
insn = find_insn(file, func->sec, func->offset);
4396
if (!insn) {
4397
WARN("%s: can't find starting instruction", func->name);
4398
return -1;
4399
}
4400
4401
for (prev = prev_insn_same_sec(file, insn);
4402
prev;
4403
prev = prev_insn_same_sec(file, prev)) {
4404
u64 offset;
4405
4406
if (prev->type != INSN_NOP)
4407
return 0;
4408
4409
offset = func->offset - prev->offset;
4410
4411
if (offset > opts.prefix)
4412
return 0;
4413
4414
if (offset < opts.prefix)
4415
continue;
4416
4417
if (!elf_create_symbol(file->elf, name, func->sec,
4418
GELF_ST_BIND(func->sym.st_info),
4419
GELF_ST_TYPE(func->sym.st_info),
4420
prev->offset, opts.prefix))
4421
return -1;
4422
4423
break;
4424
}
4425
4426
if (!prev)
4427
return 0;
4428
4429
if (!insn->cfi) {
4430
/*
4431
* This can happen if stack validation isn't enabled or the
4432
* function is annotated with STACK_FRAME_NON_STANDARD.
4433
*/
4434
return 0;
4435
}
4436
4437
/* Propagate insn->cfi to the prefix code */
4438
cfi = cfi_hash_find_or_add(insn->cfi);
4439
for (; prev != insn; prev = next_insn_same_sec(file, prev))
4440
prev->cfi = cfi;
4441
4442
return 0;
4443
}
4444
4445
static int create_prefix_symbols(struct objtool_file *file)
4446
{
4447
struct section *sec;
4448
struct symbol *func;
4449
4450
for_each_sec(file->elf, sec) {
4451
if (!is_text_sec(sec))
4452
continue;
4453
4454
sec_for_each_sym(sec, func) {
4455
if (create_prefix_symbol(file, func))
4456
return -1;
4457
}
4458
}
4459
4460
return 0;
4461
}
4462
4463
static int validate_symbol(struct objtool_file *file, struct section *sec,
4464
struct symbol *sym, struct insn_state *state)
4465
{
4466
struct instruction *insn;
4467
struct symbol *func;
4468
int ret;
4469
4470
if (!sym->len) {
4471
WARN("%s() is missing an ELF size annotation", sym->name);
4472
return 1;
4473
}
4474
4475
if (sym->pfunc != sym || sym->alias != sym)
4476
return 0;
4477
4478
insn = find_insn(file, sec, sym->offset);
4479
if (!insn || insn->visited)
4480
return 0;
4481
4482
if (opts.uaccess)
4483
state->uaccess = sym->uaccess_safe;
4484
4485
func = insn_func(insn);
4486
4487
if (opts.checksum)
4488
checksum_init(func);
4489
4490
if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
4491
trace_enable();
4492
TRACE("%s: validation begin\n", sym->name);
4493
}
4494
4495
ret = validate_branch(file, func, insn, *state);
4496
if (ret)
4497
BT_INSN(insn, "<=== (sym)");
4498
4499
TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
4500
trace_disable();
4501
4502
if (opts.checksum)
4503
checksum_finish(func);
4504
4505
return ret;
4506
}
4507
4508
static int validate_section(struct objtool_file *file, struct section *sec)
4509
{
4510
struct insn_state state;
4511
struct symbol *func;
4512
int warnings = 0;
4513
4514
sec_for_each_sym(sec, func) {
4515
if (!is_func_sym(func))
4516
continue;
4517
4518
init_insn_state(file, &state, sec);
4519
set_func_state(&state.cfi);
4520
4521
warnings += validate_symbol(file, sec, func, &state);
4522
}
4523
4524
return warnings;
4525
}
4526
4527
static int validate_noinstr_sections(struct objtool_file *file)
4528
{
4529
struct section *sec;
4530
int warnings = 0;
4531
4532
sec = find_section_by_name(file->elf, ".noinstr.text");
4533
if (sec) {
4534
warnings += validate_section(file, sec);
4535
warnings += validate_unwind_hints(file, sec);
4536
}
4537
4538
sec = find_section_by_name(file->elf, ".entry.text");
4539
if (sec) {
4540
warnings += validate_section(file, sec);
4541
warnings += validate_unwind_hints(file, sec);
4542
}
4543
4544
sec = find_section_by_name(file->elf, ".cpuidle.text");
4545
if (sec) {
4546
warnings += validate_section(file, sec);
4547
warnings += validate_unwind_hints(file, sec);
4548
}
4549
4550
return warnings;
4551
}
4552
4553
static int validate_functions(struct objtool_file *file)
4554
{
4555
struct section *sec;
4556
int warnings = 0;
4557
4558
for_each_sec(file->elf, sec) {
4559
if (!is_text_sec(sec))
4560
continue;
4561
4562
warnings += validate_section(file, sec);
4563
}
4564
4565
return warnings;
4566
}
4567
4568
static void mark_endbr_used(struct instruction *insn)
4569
{
4570
if (!list_empty(&insn->call_node))
4571
list_del_init(&insn->call_node);
4572
}
4573
4574
static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4575
{
4576
struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4577
struct instruction *first;
4578
4579
if (!sym)
4580
return false;
4581
4582
first = find_insn(file, sym->sec, sym->offset);
4583
if (!first)
4584
return false;
4585
4586
if (first->type != INSN_ENDBR && !first->noendbr)
4587
return false;
4588
4589
return insn->offset == sym->offset + sym->len;
4590
}
4591
4592
static int __validate_ibt_insn(struct objtool_file *file, struct instruction *insn,
4593
struct instruction *dest)
4594
{
4595
if (dest->type == INSN_ENDBR) {
4596
mark_endbr_used(dest);
4597
return 0;
4598
}
4599
4600
if (insn_func(dest) && insn_func(insn) &&
4601
insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4602
/*
4603
* Anything from->to self is either _THIS_IP_ or
4604
* IRET-to-self.
4605
*
4606
* There is no sane way to annotate _THIS_IP_ since the
4607
* compiler treats the relocation as a constant and is
4608
* happy to fold in offsets, skewing any annotation we
4609
* do, leading to vast amounts of false-positives.
4610
*
4611
* There's also compiler generated _THIS_IP_ through
4612
* KCOV and such which we have no hope of annotating.
4613
*
4614
* As such, blanket accept self-references without
4615
* issue.
4616
*/
4617
return 0;
4618
}
4619
4620
/*
4621
* Accept anything ANNOTATE_NOENDBR.
4622
*/
4623
if (dest->noendbr)
4624
return 0;
4625
4626
/*
4627
* Accept if this is the instruction after a symbol
4628
* that is (no)endbr -- typical code-range usage.
4629
*/
4630
if (noendbr_range(file, dest))
4631
return 0;
4632
4633
WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4634
return 1;
4635
}
4636
4637
static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4638
{
4639
struct instruction *dest;
4640
struct reloc *reloc;
4641
unsigned long off;
4642
int warnings = 0;
4643
4644
/*
4645
* Looking for function pointer load relocations. Ignore
4646
* direct/indirect branches:
4647
*/
4648
switch (insn->type) {
4649
4650
case INSN_CALL:
4651
case INSN_CALL_DYNAMIC:
4652
case INSN_JUMP_CONDITIONAL:
4653
case INSN_JUMP_UNCONDITIONAL:
4654
case INSN_JUMP_DYNAMIC:
4655
case INSN_JUMP_DYNAMIC_CONDITIONAL:
4656
case INSN_RETURN:
4657
case INSN_NOP:
4658
return 0;
4659
4660
case INSN_LEA_RIP:
4661
if (!insn_reloc(file, insn)) {
4662
/* local function pointer reference without reloc */
4663
4664
off = arch_jump_destination(insn);
4665
4666
dest = find_insn(file, insn->sec, off);
4667
if (!dest) {
4668
WARN_INSN(insn, "corrupt function pointer reference");
4669
return 1;
4670
}
4671
4672
return __validate_ibt_insn(file, insn, dest);
4673
}
4674
break;
4675
4676
default:
4677
break;
4678
}
4679
4680
for (reloc = insn_reloc(file, insn);
4681
reloc;
4682
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4683
reloc_offset(reloc) + 1,
4684
(insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4685
4686
off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
4687
4688
dest = find_insn(file, reloc->sym->sec, off);
4689
if (!dest)
4690
continue;
4691
4692
warnings += __validate_ibt_insn(file, insn, dest);
4693
}
4694
4695
return warnings;
4696
}
4697
4698
static int validate_ibt_data_reloc(struct objtool_file *file,
4699
struct reloc *reloc)
4700
{
4701
struct instruction *dest;
4702
4703
dest = find_insn(file, reloc->sym->sec,
4704
reloc->sym->offset + reloc_addend(reloc));
4705
if (!dest)
4706
return 0;
4707
4708
if (dest->type == INSN_ENDBR) {
4709
mark_endbr_used(dest);
4710
return 0;
4711
}
4712
4713
if (dest->noendbr)
4714
return 0;
4715
4716
WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
4717
"data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4718
4719
return 1;
4720
}
4721
4722
/*
4723
* Validate IBT rules and remove used ENDBR instructions from the seal list.
4724
* Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4725
* NOPs) later, in create_ibt_endbr_seal_sections().
4726
*/
4727
static int validate_ibt(struct objtool_file *file)
4728
{
4729
struct section *sec;
4730
struct reloc *reloc;
4731
struct instruction *insn;
4732
int warnings = 0;
4733
4734
for_each_insn(file, insn)
4735
warnings += validate_ibt_insn(file, insn);
4736
4737
for_each_sec(file->elf, sec) {
4738
4739
/* Already done by validate_ibt_insn() */
4740
if (is_text_sec(sec))
4741
continue;
4742
4743
if (!sec->rsec)
4744
continue;
4745
4746
/*
4747
* These sections can reference text addresses, but not with
4748
* the intent to indirect branch to them.
4749
*/
4750
if ((!strncmp(sec->name, ".discard", 8) &&
4751
strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4752
!strncmp(sec->name, ".debug", 6) ||
4753
!strcmp(sec->name, ".altinstructions") ||
4754
!strcmp(sec->name, ".ibt_endbr_seal") ||
4755
!strcmp(sec->name, ".kcfi_traps") ||
4756
!strcmp(sec->name, ".orc_unwind_ip") ||
4757
!strcmp(sec->name, ".retpoline_sites") ||
4758
!strcmp(sec->name, ".smp_locks") ||
4759
!strcmp(sec->name, ".static_call_sites") ||
4760
!strcmp(sec->name, "_error_injection_whitelist") ||
4761
!strcmp(sec->name, "_kprobe_blacklist") ||
4762
!strcmp(sec->name, "__bug_table") ||
4763
!strcmp(sec->name, "__ex_table") ||
4764
!strcmp(sec->name, "__jump_table") ||
4765
!strcmp(sec->name, ".init.klp_funcs") ||
4766
!strcmp(sec->name, "__mcount_loc") ||
4767
!strcmp(sec->name, ".llvm.call-graph-profile") ||
4768
!strcmp(sec->name, ".llvm_bb_addr_map") ||
4769
!strcmp(sec->name, "__tracepoints") ||
4770
!strcmp(sec->name, ".return_sites") ||
4771
!strcmp(sec->name, ".call_sites") ||
4772
!strcmp(sec->name, "__patchable_function_entries"))
4773
continue;
4774
4775
for_each_reloc(sec->rsec, reloc)
4776
warnings += validate_ibt_data_reloc(file, reloc);
4777
}
4778
4779
return warnings;
4780
}
4781
4782
static int validate_sls(struct objtool_file *file)
4783
{
4784
struct instruction *insn, *next_insn;
4785
int warnings = 0;
4786
4787
for_each_insn(file, insn) {
4788
next_insn = next_insn_same_sec(file, insn);
4789
4790
if (insn->retpoline_safe)
4791
continue;
4792
4793
switch (insn->type) {
4794
case INSN_RETURN:
4795
if (!next_insn || next_insn->type != INSN_TRAP) {
4796
WARN_INSN(insn, "missing int3 after ret");
4797
warnings++;
4798
}
4799
4800
break;
4801
case INSN_JUMP_DYNAMIC:
4802
if (!next_insn || next_insn->type != INSN_TRAP) {
4803
WARN_INSN(insn, "missing int3 after indirect jump");
4804
warnings++;
4805
}
4806
break;
4807
default:
4808
break;
4809
}
4810
}
4811
4812
return warnings;
4813
}
4814
4815
static int validate_reachable_instructions(struct objtool_file *file)
4816
{
4817
struct instruction *insn, *prev_insn;
4818
struct symbol *call_dest;
4819
int warnings = 0;
4820
4821
if (file->ignore_unreachables)
4822
return 0;
4823
4824
for_each_insn(file, insn) {
4825
if (insn->visited || ignore_unreachable_insn(file, insn))
4826
continue;
4827
4828
prev_insn = prev_insn_same_sec(file, insn);
4829
if (prev_insn && prev_insn->dead_end) {
4830
call_dest = insn_call_dest(prev_insn);
4831
if (call_dest) {
4832
WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4833
call_dest->name);
4834
warnings++;
4835
continue;
4836
}
4837
}
4838
4839
WARN_INSN(insn, "unreachable instruction");
4840
warnings++;
4841
}
4842
4843
return warnings;
4844
}
4845
4846
__weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
4847
{
4848
unsigned int type = reloc_type(reloc);
4849
size_t sz = elf_addr_size(elf);
4850
4851
return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
4852
}
4853
4854
static int check_abs_references(struct objtool_file *file)
4855
{
4856
struct section *sec;
4857
struct reloc *reloc;
4858
int ret = 0;
4859
4860
for_each_sec(file->elf, sec) {
4861
/* absolute references in non-loadable sections are fine */
4862
if (!(sec->sh.sh_flags & SHF_ALLOC))
4863
continue;
4864
4865
/* section must have an associated .rela section */
4866
if (!sec->rsec)
4867
continue;
4868
4869
/*
4870
* Special case for compiler generated metadata that is not
4871
* consumed until after boot.
4872
*/
4873
if (!strcmp(sec->name, "__patchable_function_entries"))
4874
continue;
4875
4876
for_each_reloc(sec->rsec, reloc) {
4877
if (arch_absolute_reloc(file->elf, reloc)) {
4878
WARN("section %s has absolute relocation at offset 0x%llx",
4879
sec->name, (unsigned long long)reloc_offset(reloc));
4880
ret++;
4881
}
4882
}
4883
}
4884
return ret;
4885
}
4886
4887
struct insn_chunk {
4888
void *addr;
4889
struct insn_chunk *next;
4890
};
4891
4892
/*
4893
* Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4894
* which can trigger more allocations for .debug_* sections whose data hasn't
4895
* been read yet.
4896
*/
4897
static void free_insns(struct objtool_file *file)
4898
{
4899
struct instruction *insn;
4900
struct insn_chunk *chunks = NULL, *chunk;
4901
4902
for_each_insn(file, insn) {
4903
if (!insn->idx) {
4904
chunk = malloc(sizeof(*chunk));
4905
chunk->addr = insn;
4906
chunk->next = chunks;
4907
chunks = chunk;
4908
}
4909
}
4910
4911
for (chunk = chunks; chunk; chunk = chunk->next)
4912
free(chunk->addr);
4913
}
4914
4915
const char *objtool_disas_insn(struct instruction *insn)
4916
{
4917
struct disas_context *dctx = objtool_disas_ctx;
4918
4919
if (!dctx)
4920
return "";
4921
4922
disas_insn(dctx, insn);
4923
return disas_result(dctx);
4924
}
4925
4926
int check(struct objtool_file *file)
4927
{
4928
struct disas_context *disas_ctx = NULL;
4929
int ret = 0, warnings = 0;
4930
4931
/*
4932
* Create a disassembly context if we might disassemble any
4933
* instruction or function.
4934
*/
4935
if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
4936
disas_ctx = disas_context_create(file);
4937
if (!disas_ctx) {
4938
opts.disas = false;
4939
opts.trace = false;
4940
}
4941
objtool_disas_ctx = disas_ctx;
4942
}
4943
4944
arch_initial_func_cfi_state(&initial_func_cfi);
4945
init_cfi_state(&init_cfi);
4946
init_cfi_state(&func_cfi);
4947
set_func_state(&func_cfi);
4948
init_cfi_state(&force_undefined_cfi);
4949
force_undefined_cfi.force_undefined = true;
4950
4951
if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
4952
ret = -1;
4953
goto out;
4954
}
4955
4956
cfi_hash_add(&init_cfi);
4957
cfi_hash_add(&func_cfi);
4958
4959
ret = checksum_debug_init(file);
4960
if (ret)
4961
goto out;
4962
4963
ret = decode_sections(file);
4964
if (ret)
4965
goto out;
4966
4967
if (!nr_insns)
4968
goto out;
4969
4970
if (opts.retpoline)
4971
warnings += validate_retpoline(file);
4972
4973
if (validate_branch_enabled()) {
4974
int w = 0;
4975
4976
w += validate_functions(file);
4977
w += validate_unwind_hints(file, NULL);
4978
if (!w)
4979
w += validate_reachable_instructions(file);
4980
4981
warnings += w;
4982
4983
} else if (opts.noinstr) {
4984
warnings += validate_noinstr_sections(file);
4985
}
4986
4987
if (opts.unret) {
4988
/*
4989
* Must be after validate_branch() and friends, it plays
4990
* further games with insn->visited.
4991
*/
4992
warnings += validate_unrets(file);
4993
}
4994
4995
if (opts.ibt)
4996
warnings += validate_ibt(file);
4997
4998
if (opts.sls)
4999
warnings += validate_sls(file);
5000
5001
if (opts.static_call) {
5002
ret = create_static_call_sections(file);
5003
if (ret)
5004
goto out;
5005
}
5006
5007
if (opts.retpoline) {
5008
ret = create_retpoline_sites_sections(file);
5009
if (ret)
5010
goto out;
5011
}
5012
5013
if (opts.cfi) {
5014
ret = create_cfi_sections(file);
5015
if (ret)
5016
goto out;
5017
}
5018
5019
if (opts.rethunk) {
5020
ret = create_return_sites_sections(file);
5021
if (ret)
5022
goto out;
5023
5024
if (opts.hack_skylake) {
5025
ret = create_direct_call_sections(file);
5026
if (ret)
5027
goto out;
5028
}
5029
}
5030
5031
if (opts.mcount) {
5032
ret = create_mcount_loc_sections(file);
5033
if (ret)
5034
goto out;
5035
}
5036
5037
if (opts.prefix) {
5038
ret = create_prefix_symbols(file);
5039
if (ret)
5040
goto out;
5041
}
5042
5043
if (opts.ibt) {
5044
ret = create_ibt_endbr_seal_sections(file);
5045
if (ret)
5046
goto out;
5047
}
5048
5049
if (opts.noabs)
5050
warnings += check_abs_references(file);
5051
5052
if (opts.checksum) {
5053
ret = create_sym_checksum_section(file);
5054
if (ret)
5055
goto out;
5056
}
5057
5058
if (opts.orc && nr_insns) {
5059
ret = orc_create(file);
5060
if (ret)
5061
goto out;
5062
}
5063
5064
if (opts.stats) {
5065
printf("nr_insns_visited: %ld\n", nr_insns_visited);
5066
printf("nr_cfi: %ld\n", nr_cfi);
5067
printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
5068
printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
5069
}
5070
5071
out:
5072
if (ret || warnings) {
5073
if (opts.werror && warnings)
5074
ret = 1;
5075
5076
if (opts.verbose) {
5077
if (opts.werror && warnings)
5078
WARN("%d warning(s) upgraded to errors", warnings);
5079
disas_warned_funcs(disas_ctx);
5080
}
5081
}
5082
5083
if (opts.disas)
5084
disas_funcs(disas_ctx);
5085
5086
if (disas_ctx) {
5087
disas_context_destroy(disas_ctx);
5088
objtool_disas_ctx = NULL;
5089
}
5090
5091
free_insns(file);
5092
5093
if (!ret && !warnings)
5094
return 0;
5095
5096
if (opts.backup && make_backup())
5097
return 1;
5098
5099
return ret;
5100
}
5101
5102