Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/objtool/klp-diff.c
49233 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
#define _GNU_SOURCE /* memmem() */
3
#include <subcmd/parse-options.h>
4
#include <stdlib.h>
5
#include <string.h>
6
#include <libgen.h>
7
#include <stdio.h>
8
#include <ctype.h>
9
10
#include <objtool/objtool.h>
11
#include <objtool/warn.h>
12
#include <objtool/arch.h>
13
#include <objtool/klp.h>
14
#include <objtool/util.h>
15
#include <arch/special.h>
16
17
#include <linux/objtool_types.h>
18
#include <linux/livepatch_external.h>
19
#include <linux/stringify.h>
20
#include <linux/string.h>
21
#include <linux/jhash.h>
22
23
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
24
25
struct elfs {
26
struct elf *orig, *patched, *out;
27
const char *modname;
28
};
29
30
struct export {
31
struct hlist_node hash;
32
char *mod, *sym;
33
};
34
35
static const char * const klp_diff_usage[] = {
36
"objtool klp diff [<options>] <in1.o> <in2.o> <out.o>",
37
NULL,
38
};
39
40
static const struct option klp_diff_options[] = {
41
OPT_GROUP("Options:"),
42
OPT_BOOLEAN('d', "debug", &debug, "enable debug output"),
43
OPT_END(),
44
};
45
46
static DEFINE_HASHTABLE(exports, 15);
47
48
static inline u32 str_hash(const char *str)
49
{
50
return jhash(str, strlen(str), 0);
51
}
52
53
static char *escape_str(const char *orig)
54
{
55
size_t len = 0;
56
const char *a;
57
char *b, *new;
58
59
for (a = orig; *a; a++) {
60
switch (*a) {
61
case '\001': len += 5; break;
62
case '\n':
63
case '\t': len += 2; break;
64
default: len++;
65
}
66
}
67
68
new = malloc(len + 1);
69
if (!new)
70
return NULL;
71
72
for (a = orig, b = new; *a; a++) {
73
switch (*a) {
74
case '\001': memcpy(b, "<SOH>", 5); b += 5; break;
75
case '\n': *b++ = '\\'; *b++ = 'n'; break;
76
case '\t': *b++ = '\\'; *b++ = 't'; break;
77
default: *b++ = *a;
78
}
79
}
80
81
*b = '\0';
82
return new;
83
}
84
85
static int read_exports(void)
86
{
87
const char *symvers = "Module.symvers";
88
char line[1024], *path = NULL;
89
unsigned int line_num = 1;
90
FILE *file;
91
92
file = fopen(symvers, "r");
93
if (!file) {
94
path = top_level_dir(symvers);
95
if (!path) {
96
ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers);
97
return -1;
98
}
99
100
file = fopen(path, "r");
101
if (!file) {
102
ERROR_GLIBC("fopen");
103
return -1;
104
}
105
}
106
107
while (fgets(line, 1024, file)) {
108
char *sym, *mod, *type;
109
struct export *export;
110
111
sym = strchr(line, '\t');
112
if (!sym) {
113
ERROR("malformed Module.symvers (sym) at line %d", line_num);
114
return -1;
115
}
116
117
*sym++ = '\0';
118
119
mod = strchr(sym, '\t');
120
if (!mod) {
121
ERROR("malformed Module.symvers (mod) at line %d", line_num);
122
return -1;
123
}
124
125
*mod++ = '\0';
126
127
type = strchr(mod, '\t');
128
if (!type) {
129
ERROR("malformed Module.symvers (type) at line %d", line_num);
130
return -1;
131
}
132
133
*type++ = '\0';
134
135
if (*sym == '\0' || *mod == '\0') {
136
ERROR("malformed Module.symvers at line %d", line_num);
137
return -1;
138
}
139
140
export = calloc(1, sizeof(*export));
141
if (!export) {
142
ERROR_GLIBC("calloc");
143
return -1;
144
}
145
146
export->mod = strdup(mod);
147
if (!export->mod) {
148
ERROR_GLIBC("strdup");
149
return -1;
150
}
151
152
export->sym = strdup(sym);
153
if (!export->sym) {
154
ERROR_GLIBC("strdup");
155
return -1;
156
}
157
158
hash_add(exports, &export->hash, str_hash(sym));
159
}
160
161
free(path);
162
fclose(file);
163
164
return 0;
165
}
166
167
static int read_sym_checksums(struct elf *elf)
168
{
169
struct section *sec;
170
171
sec = find_section_by_name(elf, ".discard.sym_checksum");
172
if (!sec) {
173
ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?",
174
elf->name);
175
return -1;
176
}
177
178
if (!sec->rsec) {
179
ERROR("missing reloc section for .discard.sym_checksum");
180
return -1;
181
}
182
183
if (sec_size(sec) % sizeof(struct sym_checksum)) {
184
ERROR("struct sym_checksum size mismatch");
185
return -1;
186
}
187
188
for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) {
189
struct sym_checksum *sym_checksum;
190
struct reloc *reloc;
191
struct symbol *sym;
192
193
sym_checksum = (struct sym_checksum *)sec->data->d_buf + i;
194
195
reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum));
196
if (!reloc) {
197
ERROR("can't find reloc for sym_checksum[%d]", i);
198
return -1;
199
}
200
201
sym = reloc->sym;
202
203
if (is_sec_sym(sym)) {
204
ERROR("not sure how to handle section %s", sym->name);
205
return -1;
206
}
207
208
if (is_func_sym(sym))
209
sym->csum.checksum = sym_checksum->checksum;
210
}
211
212
return 0;
213
}
214
215
static struct symbol *first_file_symbol(struct elf *elf)
216
{
217
struct symbol *sym;
218
219
for_each_sym(elf, sym) {
220
if (is_file_sym(sym))
221
return sym;
222
}
223
224
return NULL;
225
}
226
227
static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym)
228
{
229
for_each_sym_continue(elf, sym) {
230
if (is_file_sym(sym))
231
return sym;
232
}
233
234
return NULL;
235
}
236
237
/*
238
* Certain static local variables should never be correlated. They will be
239
* used in place rather than referencing the originals.
240
*/
241
static bool is_uncorrelated_static_local(struct symbol *sym)
242
{
243
static const char * const vars[] = {
244
"__already_done.",
245
"__func__.",
246
"__key.",
247
"__warned.",
248
"_entry.",
249
"_entry_ptr.",
250
"_rs.",
251
"descriptor.",
252
"CSWTCH.",
253
};
254
255
if (!is_object_sym(sym) || !is_local_sym(sym))
256
return false;
257
258
if (!strcmp(sym->sec->name, ".data.once"))
259
return true;
260
261
for (int i = 0; i < ARRAY_SIZE(vars); i++) {
262
if (strstarts(sym->name, vars[i]))
263
return true;
264
}
265
266
return false;
267
}
268
269
/*
270
* Clang emits several useless .Ltmp_* code labels.
271
*/
272
static bool is_clang_tmp_label(struct symbol *sym)
273
{
274
return sym->type == STT_NOTYPE &&
275
is_text_sec(sym->sec) &&
276
strstarts(sym->name, ".Ltmp") &&
277
isdigit(sym->name[5]);
278
}
279
280
static bool is_special_section(struct section *sec)
281
{
282
static const char * const specials[] = {
283
".altinstructions",
284
".smp_locks",
285
"__bug_table",
286
"__ex_table",
287
"__jump_table",
288
"__mcount_loc",
289
290
/*
291
* Extract .static_call_sites here to inherit non-module
292
* preferential treatment. The later static call processing
293
* during klp module build will be skipped when it sees this
294
* section already exists.
295
*/
296
".static_call_sites",
297
};
298
299
static const char * const non_special_discards[] = {
300
".discard.addressable",
301
".discard.sym_checksum",
302
};
303
304
if (is_text_sec(sec))
305
return false;
306
307
for (int i = 0; i < ARRAY_SIZE(specials); i++) {
308
if (!strcmp(sec->name, specials[i]))
309
return true;
310
}
311
312
/* Most .discard data sections are special */
313
for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) {
314
if (!strcmp(sec->name, non_special_discards[i]))
315
return false;
316
}
317
318
return strstarts(sec->name, ".discard.");
319
}
320
321
/*
322
* These sections are referenced by special sections but aren't considered
323
* special sections themselves.
324
*/
325
static bool is_special_section_aux(struct section *sec)
326
{
327
static const char * const specials_aux[] = {
328
".altinstr_replacement",
329
".altinstr_aux",
330
};
331
332
for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) {
333
if (!strcmp(sec->name, specials_aux[i]))
334
return true;
335
}
336
337
return false;
338
}
339
340
/*
341
* These symbols should never be correlated, so their local patched versions
342
* are used instead of linking to the originals.
343
*/
344
static bool dont_correlate(struct symbol *sym)
345
{
346
return is_file_sym(sym) ||
347
is_null_sym(sym) ||
348
is_sec_sym(sym) ||
349
is_prefix_func(sym) ||
350
is_uncorrelated_static_local(sym) ||
351
is_clang_tmp_label(sym) ||
352
is_string_sec(sym->sec) ||
353
is_special_section(sym->sec) ||
354
is_special_section_aux(sym->sec) ||
355
strstarts(sym->name, "__initcall__");
356
}
357
358
/*
359
* For each symbol in the original kernel, find its corresponding "twin" in the
360
* patched kernel.
361
*/
362
static int correlate_symbols(struct elfs *e)
363
{
364
struct symbol *file1_sym, *file2_sym;
365
struct symbol *sym1, *sym2;
366
367
file1_sym = first_file_symbol(e->orig);
368
file2_sym = first_file_symbol(e->patched);
369
370
/*
371
* Correlate any locals before the first FILE symbol. This has been
372
* seen when LTO inexplicably strips the initramfs_data.o FILE symbol
373
* due to the file only containing data and no code.
374
*/
375
for_each_sym(e->orig, sym1) {
376
if (sym1 == file1_sym || !is_local_sym(sym1))
377
break;
378
379
if (dont_correlate(sym1))
380
continue;
381
382
for_each_sym(e->patched, sym2) {
383
if (sym2 == file2_sym || !is_local_sym(sym2))
384
break;
385
386
if (sym2->twin || dont_correlate(sym2))
387
continue;
388
389
if (strcmp(sym1->demangled_name, sym2->demangled_name))
390
continue;
391
392
sym1->twin = sym2;
393
sym2->twin = sym1;
394
break;
395
}
396
}
397
398
/* Correlate locals after the first FILE symbol */
399
for (; ; file1_sym = next_file_symbol(e->orig, file1_sym),
400
file2_sym = next_file_symbol(e->patched, file2_sym)) {
401
402
if (!file1_sym && file2_sym) {
403
ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name);
404
return -1;
405
}
406
407
if (file1_sym && !file2_sym) {
408
ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name);
409
return -1;
410
}
411
412
if (!file1_sym)
413
break;
414
415
if (strcmp(file1_sym->name, file2_sym->name)) {
416
ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name);
417
return -1;
418
}
419
420
file1_sym->twin = file2_sym;
421
file2_sym->twin = file1_sym;
422
423
sym1 = file1_sym;
424
425
for_each_sym_continue(e->orig, sym1) {
426
if (is_file_sym(sym1) || !is_local_sym(sym1))
427
break;
428
429
if (dont_correlate(sym1))
430
continue;
431
432
sym2 = file2_sym;
433
for_each_sym_continue(e->patched, sym2) {
434
if (is_file_sym(sym2) || !is_local_sym(sym2))
435
break;
436
437
if (sym2->twin || dont_correlate(sym2))
438
continue;
439
440
if (strcmp(sym1->demangled_name, sym2->demangled_name))
441
continue;
442
443
sym1->twin = sym2;
444
sym2->twin = sym1;
445
break;
446
}
447
}
448
}
449
450
/* Correlate globals */
451
for_each_sym(e->orig, sym1) {
452
if (sym1->bind == STB_LOCAL)
453
continue;
454
455
sym2 = find_global_symbol_by_name(e->patched, sym1->name);
456
457
if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) {
458
sym1->twin = sym2;
459
sym2->twin = sym1;
460
}
461
}
462
463
for_each_sym(e->orig, sym1) {
464
if (sym1->twin || dont_correlate(sym1))
465
continue;
466
WARN("no correlation: %s", sym1->name);
467
}
468
469
return 0;
470
}
471
472
/* "sympos" is used by livepatch to disambiguate duplicate symbol names */
473
static unsigned long find_sympos(struct elf *elf, struct symbol *sym)
474
{
475
bool vmlinux = str_ends_with(objname, "vmlinux.o");
476
unsigned long sympos = 0, nr_matches = 0;
477
bool has_dup = false;
478
struct symbol *s;
479
480
if (sym->bind != STB_LOCAL)
481
return 0;
482
483
if (vmlinux && sym->type == STT_FUNC) {
484
/*
485
* HACK: Unfortunately, symbol ordering can differ between
486
* vmlinux.o and vmlinux due to the linker script emitting
487
* .text.unlikely* before .text*. Count .text.unlikely* first.
488
*
489
* TODO: Disambiguate symbols more reliably (checksums?)
490
*/
491
for_each_sym(elf, s) {
492
if (strstarts(s->sec->name, ".text.unlikely") &&
493
!strcmp(s->name, sym->name)) {
494
nr_matches++;
495
if (s == sym)
496
sympos = nr_matches;
497
else
498
has_dup = true;
499
}
500
}
501
for_each_sym(elf, s) {
502
if (!strstarts(s->sec->name, ".text.unlikely") &&
503
!strcmp(s->name, sym->name)) {
504
nr_matches++;
505
if (s == sym)
506
sympos = nr_matches;
507
else
508
has_dup = true;
509
}
510
}
511
} else {
512
for_each_sym(elf, s) {
513
if (!strcmp(s->name, sym->name)) {
514
nr_matches++;
515
if (s == sym)
516
sympos = nr_matches;
517
else
518
has_dup = true;
519
}
520
}
521
}
522
523
if (!sympos) {
524
ERROR("can't find sympos for %s", sym->name);
525
return ULONG_MAX;
526
}
527
528
return has_dup ? sympos : 0;
529
}
530
531
static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym);
532
533
static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym,
534
bool data_too)
535
{
536
struct section *out_sec = NULL;
537
unsigned long offset = 0;
538
struct symbol *out_sym;
539
540
if (data_too && !is_undef_sym(patched_sym)) {
541
struct section *patched_sec = patched_sym->sec;
542
543
out_sec = find_section_by_name(elf, patched_sec->name);
544
if (!out_sec) {
545
out_sec = elf_create_section(elf, patched_sec->name, 0,
546
patched_sec->sh.sh_entsize,
547
patched_sec->sh.sh_type,
548
patched_sec->sh.sh_addralign,
549
patched_sec->sh.sh_flags);
550
if (!out_sec)
551
return NULL;
552
}
553
554
if (is_string_sec(patched_sym->sec)) {
555
out_sym = elf_create_section_symbol(elf, out_sec);
556
if (!out_sym)
557
return NULL;
558
559
goto sym_created;
560
}
561
562
if (!is_sec_sym(patched_sym))
563
offset = sec_size(out_sec);
564
565
if (patched_sym->len || is_sec_sym(patched_sym)) {
566
void *data = NULL;
567
size_t size;
568
569
/* bss doesn't have data */
570
if (patched_sym->sec->data->d_buf)
571
data = patched_sym->sec->data->d_buf + patched_sym->offset;
572
573
if (is_sec_sym(patched_sym))
574
size = sec_size(patched_sym->sec);
575
else
576
size = patched_sym->len;
577
578
if (!elf_add_data(elf, out_sec, data, size))
579
return NULL;
580
}
581
}
582
583
out_sym = elf_create_symbol(elf, patched_sym->name, out_sec,
584
patched_sym->bind, patched_sym->type,
585
offset, patched_sym->len);
586
if (!out_sym)
587
return NULL;
588
589
sym_created:
590
patched_sym->clone = out_sym;
591
out_sym->clone = patched_sym;
592
593
return out_sym;
594
}
595
596
static const char *sym_type(struct symbol *sym)
597
{
598
switch (sym->type) {
599
case STT_NOTYPE: return "NOTYPE";
600
case STT_OBJECT: return "OBJECT";
601
case STT_FUNC: return "FUNC";
602
case STT_SECTION: return "SECTION";
603
case STT_FILE: return "FILE";
604
default: return "UNKNOWN";
605
}
606
}
607
608
static const char *sym_bind(struct symbol *sym)
609
{
610
switch (sym->bind) {
611
case STB_LOCAL: return "LOCAL";
612
case STB_GLOBAL: return "GLOBAL";
613
case STB_WEAK: return "WEAK";
614
default: return "UNKNOWN";
615
}
616
}
617
618
/*
619
* Copy a symbol to the output object, optionally including its data and
620
* relocations.
621
*/
622
static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym,
623
bool data_too)
624
{
625
struct symbol *pfx;
626
627
if (patched_sym->clone)
628
return patched_sym->clone;
629
630
dbg_indent("%s%s", patched_sym->name, data_too ? " [+DATA]" : "");
631
632
/* Make sure the prefix gets cloned first */
633
if (is_func_sym(patched_sym) && data_too) {
634
pfx = get_func_prefix(patched_sym);
635
if (pfx)
636
clone_symbol(e, pfx, true);
637
}
638
639
if (!__clone_symbol(e->out, patched_sym, data_too))
640
return NULL;
641
642
if (data_too && clone_sym_relocs(e, patched_sym))
643
return NULL;
644
645
return patched_sym->clone;
646
}
647
648
static void mark_included_function(struct symbol *func)
649
{
650
struct symbol *pfx;
651
652
func->included = 1;
653
654
/* Include prefix function */
655
pfx = get_func_prefix(func);
656
if (pfx)
657
pfx->included = 1;
658
659
/* Make sure .cold parent+child always stay together */
660
if (func->cfunc && func->cfunc != func)
661
func->cfunc->included = 1;
662
if (func->pfunc && func->pfunc != func)
663
func->pfunc->included = 1;
664
}
665
666
/*
667
* Copy all changed functions (and their dependencies) from the patched object
668
* to the output object.
669
*/
670
static int mark_changed_functions(struct elfs *e)
671
{
672
struct symbol *sym_orig, *patched_sym;
673
bool changed = false;
674
675
/* Find changed functions */
676
for_each_sym(e->orig, sym_orig) {
677
if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig))
678
continue;
679
680
patched_sym = sym_orig->twin;
681
if (!patched_sym)
682
continue;
683
684
if (sym_orig->csum.checksum != patched_sym->csum.checksum) {
685
patched_sym->changed = 1;
686
mark_included_function(patched_sym);
687
changed = true;
688
}
689
}
690
691
/* Find added functions and print them */
692
for_each_sym(e->patched, patched_sym) {
693
if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym))
694
continue;
695
696
if (!patched_sym->twin) {
697
printf("%s: new function: %s\n", objname, patched_sym->name);
698
mark_included_function(patched_sym);
699
changed = true;
700
}
701
}
702
703
/* Print changed functions */
704
for_each_sym(e->patched, patched_sym) {
705
if (patched_sym->changed)
706
printf("%s: changed function: %s\n", objname, patched_sym->name);
707
}
708
709
return !changed ? -1 : 0;
710
}
711
712
static int clone_included_functions(struct elfs *e)
713
{
714
struct symbol *patched_sym;
715
716
for_each_sym(e->patched, patched_sym) {
717
if (patched_sym->included) {
718
if (!clone_symbol(e, patched_sym, true))
719
return -1;
720
}
721
}
722
723
return 0;
724
}
725
726
/*
727
* Determine whether a relocation should reference the section rather than the
728
* underlying symbol.
729
*/
730
static bool section_reference_needed(struct section *sec)
731
{
732
/*
733
* String symbols are zero-length and uncorrelated. It's easier to
734
* deal with them as section symbols.
735
*/
736
if (is_string_sec(sec))
737
return true;
738
739
/*
740
* .rodata has mostly anonymous data so there's no way to determine the
741
* length of a needed reference. just copy the whole section if needed.
742
*/
743
if (strstarts(sec->name, ".rodata"))
744
return true;
745
746
/* UBSAN anonymous data */
747
if (strstarts(sec->name, ".data..Lubsan") || /* GCC */
748
strstarts(sec->name, ".data..L__unnamed_")) /* Clang */
749
return true;
750
751
return false;
752
}
753
754
static bool is_reloc_allowed(struct reloc *reloc)
755
{
756
return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym);
757
}
758
759
static struct export *find_export(struct symbol *sym)
760
{
761
struct export *export;
762
763
hash_for_each_possible(exports, export, hash, str_hash(sym->name)) {
764
if (!strcmp(export->sym, sym->name))
765
return export;
766
}
767
768
return NULL;
769
}
770
771
static const char *__find_modname(struct elfs *e)
772
{
773
struct section *sec;
774
char *name;
775
776
sec = find_section_by_name(e->orig, ".modinfo");
777
if (!sec) {
778
ERROR("missing .modinfo section");
779
return NULL;
780
}
781
782
name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6);
783
if (name)
784
return name + 6;
785
786
name = strdup(e->orig->name);
787
if (!name) {
788
ERROR_GLIBC("strdup");
789
return NULL;
790
}
791
792
for (char *c = name; *c; c++) {
793
if (*c == '/')
794
name = c + 1;
795
else if (*c == '-')
796
*c = '_';
797
else if (*c == '.') {
798
*c = '\0';
799
break;
800
}
801
}
802
803
return name;
804
}
805
806
/* Get the object's module name as defined by the kernel (and klp_object) */
807
static const char *find_modname(struct elfs *e)
808
{
809
const char *modname;
810
811
if (e->modname)
812
return e->modname;
813
814
modname = __find_modname(e);
815
e->modname = modname;
816
return modname;
817
}
818
819
/*
820
* Copying a function from its native compiled environment to a kernel module
821
* removes its natural access to local functions/variables and unexported
822
* globals. References to such symbols need to be converted to KLP relocs so
823
* the kernel arch relocation code knows to apply them and where to find the
824
* symbols. Particularly, duplicate static symbols need to be disambiguated.
825
*/
826
static bool klp_reloc_needed(struct reloc *patched_reloc)
827
{
828
struct symbol *patched_sym = patched_reloc->sym;
829
struct export *export;
830
831
/* no external symbol to reference */
832
if (dont_correlate(patched_sym))
833
return false;
834
835
/* For included functions, a regular reloc will do. */
836
if (patched_sym->included)
837
return false;
838
839
/*
840
* If exported by a module, it has to be a klp reloc. Thanks to the
841
* clusterfunk that is late module patching, the patch module is
842
* allowed to be loaded before any modules it depends on.
843
*
844
* If exported by vmlinux, a normal reloc will do.
845
*/
846
export = find_export(patched_sym);
847
if (export)
848
return strcmp(export->mod, "vmlinux");
849
850
if (!patched_sym->twin) {
851
/*
852
* Presumably the symbol and its reference were added by the
853
* patch. The symbol could be defined in this .o or in another
854
* .o in the patch module.
855
*
856
* This check needs to be *after* the export check due to the
857
* possibility of the patch adding a new UNDEF reference to an
858
* exported symbol.
859
*/
860
return false;
861
}
862
863
/* Unexported symbol which lives in the original vmlinux or module. */
864
return true;
865
}
866
867
static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc)
868
{
869
struct symbol *sym = reloc->sym;
870
struct section *sec = sym->sec;
871
872
if (!sec->sym && !elf_create_section_symbol(elf, sec))
873
return -1;
874
875
reloc->sym = sec->sym;
876
set_reloc_sym(elf, reloc, sym->idx);
877
set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc));
878
return 0;
879
}
880
881
static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc)
882
{
883
struct symbol *sym = reloc->sym;
884
struct section *sec = sym->sec;
885
886
/* If the symbol has a dedicated section, it's easy to find */
887
sym = find_symbol_by_offset(sec, 0);
888
if (sym && sym->len == sec_size(sec))
889
goto found_sym;
890
891
/* No dedicated section; find the symbol manually */
892
sym = find_symbol_containing(sec, arch_adjusted_addend(reloc));
893
if (!sym) {
894
/*
895
* This can happen for special section references to weak code
896
* whose symbol has been stripped by the linker.
897
*/
898
return -1;
899
}
900
901
found_sym:
902
reloc->sym = sym;
903
set_reloc_sym(elf, reloc, sym->idx);
904
set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset);
905
return 0;
906
}
907
908
/*
909
* Convert a relocation symbol reference to the needed format: either a section
910
* symbol or the underlying symbol itself.
911
*/
912
static int convert_reloc_sym(struct elf *elf, struct reloc *reloc)
913
{
914
if (is_reloc_allowed(reloc))
915
return 0;
916
917
if (section_reference_needed(reloc->sym->sec))
918
return convert_reloc_sym_to_secsym(elf, reloc);
919
else
920
return convert_reloc_secsym_to_sym(elf, reloc);
921
}
922
923
/*
924
* Convert a regular relocation to a klp relocation (sort of).
925
*/
926
static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc,
927
struct section *sec, unsigned long offset,
928
struct export *export)
929
{
930
struct symbol *patched_sym = patched_reloc->sym;
931
s64 addend = reloc_addend(patched_reloc);
932
const char *sym_modname, *sym_orig_name;
933
static struct section *klp_relocs;
934
struct symbol *sym, *klp_sym;
935
unsigned long klp_reloc_off;
936
char sym_name[SYM_NAME_LEN];
937
struct klp_reloc klp_reloc;
938
unsigned long sympos;
939
940
if (!patched_sym->twin) {
941
ERROR("unexpected klp reloc for new symbol %s", patched_sym->name);
942
return -1;
943
}
944
945
/*
946
* Keep the original reloc intact for now to avoid breaking objtool run
947
* which relies on proper relocations for many of its features. This
948
* will be disabled later by "objtool klp post-link".
949
*
950
* Convert it to UNDEF (and WEAK to avoid modpost warnings).
951
*/
952
953
sym = patched_sym->clone;
954
if (!sym) {
955
/* STB_WEAK: avoid modpost undefined symbol warnings */
956
sym = elf_create_symbol(e->out, patched_sym->name, NULL,
957
STB_WEAK, patched_sym->type, 0, 0);
958
if (!sym)
959
return -1;
960
961
patched_sym->clone = sym;
962
sym->clone = patched_sym;
963
}
964
965
if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc)))
966
return -1;
967
968
/*
969
* Create the KLP symbol.
970
*/
971
972
if (export) {
973
sym_modname = export->mod;
974
sym_orig_name = export->sym;
975
sympos = 0;
976
} else {
977
sym_modname = find_modname(e);
978
if (!sym_modname)
979
return -1;
980
981
sym_orig_name = patched_sym->twin->name;
982
sympos = find_sympos(e->orig, patched_sym->twin);
983
if (sympos == ULONG_MAX)
984
return -1;
985
}
986
987
/* symbol format: .klp.sym.modname.sym_name,sympos */
988
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld",
989
sym_modname, sym_orig_name, sympos))
990
return -1;
991
992
klp_sym = find_symbol_by_name(e->out, sym_name);
993
if (!klp_sym) {
994
__dbg_indent("%s", sym_name);
995
996
/* STB_WEAK: avoid modpost undefined symbol warnings */
997
klp_sym = elf_create_symbol(e->out, sym_name, NULL,
998
STB_WEAK, patched_sym->type, 0, 0);
999
if (!klp_sym)
1000
return -1;
1001
}
1002
1003
/*
1004
* Create the __klp_relocs entry. This will be converted to an actual
1005
* KLP rela by "objtool klp post-link".
1006
*
1007
* This intermediate step is necessary to prevent corruption by the
1008
* linker, which doesn't know how to properly handle two rela sections
1009
* applying to the same base section.
1010
*/
1011
1012
if (!klp_relocs) {
1013
klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0,
1014
0, SHT_PROGBITS, 8, SHF_ALLOC);
1015
if (!klp_relocs)
1016
return -1;
1017
}
1018
1019
klp_reloc_off = sec_size(klp_relocs);
1020
memset(&klp_reloc, 0, sizeof(klp_reloc));
1021
1022
klp_reloc.type = reloc_type(patched_reloc);
1023
if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc)))
1024
return -1;
1025
1026
/* klp_reloc.offset */
1027
if (!sec->sym && !elf_create_section_symbol(e->out, sec))
1028
return -1;
1029
1030
if (!elf_create_reloc(e->out, klp_relocs,
1031
klp_reloc_off + offsetof(struct klp_reloc, offset),
1032
sec->sym, offset, R_ABS64))
1033
return -1;
1034
1035
/* klp_reloc.sym */
1036
if (!elf_create_reloc(e->out, klp_relocs,
1037
klp_reloc_off + offsetof(struct klp_reloc, sym),
1038
klp_sym, addend, R_ABS64))
1039
return -1;
1040
1041
return 0;
1042
}
1043
1044
#define dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp) \
1045
dbg_indent("%s+0x%lx: %s%s0x%lx [%s%s%s%s%s%s]", \
1046
sec->name, offset, patched_sym->name, \
1047
addend >= 0 ? "+" : "-", labs(addend), \
1048
sym_type(patched_sym), \
1049
patched_sym->type == STT_SECTION ? "" : " ", \
1050
patched_sym->type == STT_SECTION ? "" : sym_bind(patched_sym), \
1051
is_undef_sym(patched_sym) ? " UNDEF" : "", \
1052
export ? " EXPORTED" : "", \
1053
klp ? " KLP" : "")
1054
1055
/* Copy a reloc and its symbol to the output object */
1056
static int clone_reloc(struct elfs *e, struct reloc *patched_reloc,
1057
struct section *sec, unsigned long offset)
1058
{
1059
struct symbol *patched_sym = patched_reloc->sym;
1060
struct export *export = find_export(patched_sym);
1061
long addend = reloc_addend(patched_reloc);
1062
struct symbol *out_sym;
1063
bool klp;
1064
1065
if (!is_reloc_allowed(patched_reloc)) {
1066
ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc),
1067
"missing symbol for reference to %s+%ld",
1068
patched_sym->name, addend);
1069
return -1;
1070
}
1071
1072
klp = klp_reloc_needed(patched_reloc);
1073
1074
dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp);
1075
1076
if (klp) {
1077
if (clone_reloc_klp(e, patched_reloc, sec, offset, export))
1078
return -1;
1079
1080
return 0;
1081
}
1082
1083
/*
1084
* Why !export sets 'data_too':
1085
*
1086
* Unexported non-klp symbols need to live in the patch module,
1087
* otherwise there will be unresolved symbols. Notably, this includes:
1088
*
1089
* - New functions/data
1090
* - String sections
1091
* - Special section entries
1092
* - Uncorrelated static local variables
1093
* - UBSAN sections
1094
*/
1095
out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export);
1096
if (!out_sym)
1097
return -1;
1098
1099
/*
1100
* For strings, all references use section symbols, thanks to
1101
* section_reference_needed(). clone_symbol() has cloned an empty
1102
* version of the string section. Now copy the string itself.
1103
*/
1104
if (is_string_sec(patched_sym->sec)) {
1105
const char *str = patched_sym->sec->data->d_buf + addend;
1106
1107
__dbg_indent("\"%s\"", escape_str(str));
1108
1109
addend = elf_add_string(e->out, out_sym->sec, str);
1110
if (addend == -1)
1111
return -1;
1112
}
1113
1114
if (!elf_create_reloc(e->out, sec, offset, out_sym, addend,
1115
reloc_type(patched_reloc)))
1116
return -1;
1117
1118
return 0;
1119
}
1120
1121
/* Copy all relocs needed for a symbol's contents */
1122
static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym)
1123
{
1124
struct section *patched_rsec = patched_sym->sec->rsec;
1125
struct reloc *patched_reloc;
1126
unsigned long start, end;
1127
struct symbol *out_sym;
1128
1129
out_sym = patched_sym->clone;
1130
if (!out_sym) {
1131
ERROR("no clone for %s", patched_sym->name);
1132
return -1;
1133
}
1134
1135
if (!patched_rsec)
1136
return 0;
1137
1138
if (!is_sec_sym(patched_sym) && !patched_sym->len)
1139
return 0;
1140
1141
if (is_string_sec(patched_sym->sec))
1142
return 0;
1143
1144
if (is_sec_sym(patched_sym)) {
1145
start = 0;
1146
end = sec_size(patched_sym->sec);
1147
} else {
1148
start = patched_sym->offset;
1149
end = start + patched_sym->len;
1150
}
1151
1152
for_each_reloc(patched_rsec, patched_reloc) {
1153
unsigned long offset;
1154
1155
if (reloc_offset(patched_reloc) < start ||
1156
reloc_offset(patched_reloc) >= end)
1157
continue;
1158
1159
/*
1160
* Skip any reloc referencing .altinstr_aux. Its code is
1161
* always patched by alternatives. See ALTERNATIVE_TERNARY().
1162
*/
1163
if (patched_reloc->sym->sec &&
1164
!strcmp(patched_reloc->sym->sec->name, ".altinstr_aux"))
1165
continue;
1166
1167
if (convert_reloc_sym(e->patched, patched_reloc)) {
1168
ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc),
1169
"failed to convert reloc sym '%s' to its proper format",
1170
patched_reloc->sym->name);
1171
return -1;
1172
}
1173
1174
offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset);
1175
1176
if (clone_reloc(e, patched_reloc, out_sym->sec, offset))
1177
return -1;
1178
}
1179
return 0;
1180
1181
}
1182
1183
static int create_fake_symbol(struct elf *elf, struct section *sec,
1184
unsigned long offset, size_t size)
1185
{
1186
char name[SYM_NAME_LEN];
1187
unsigned int type;
1188
static int ctr;
1189
char *c;
1190
1191
if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++))
1192
return -1;
1193
1194
for (c = name; *c; c++)
1195
if (*c == '.')
1196
*c = '_';
1197
1198
/*
1199
* STT_NOTYPE: Prevent objtool from validating .altinstr_replacement
1200
* while still allowing objdump to disassemble it.
1201
*/
1202
type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT;
1203
return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1;
1204
}
1205
1206
/*
1207
* Special sections (alternatives, etc) are basically arrays of structs.
1208
* For all the special sections, create a symbol for each struct entry. This
1209
* is a bit cumbersome, but it makes the extracting of the individual entries
1210
* much more straightforward.
1211
*
1212
* There are three ways to identify the entry sizes for a special section:
1213
*
1214
* 1) ELF section header sh_entsize: Ideally this would be used almost
1215
* everywhere. But unfortunately the toolchains make it difficult. The
1216
* assembler .[push]section directive syntax only takes entsize when
1217
* combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with
1218
* SHF_WRITE. And some special sections do need to be writable.
1219
*
1220
* Another place this wouldn't work is .altinstr_replacement, whose entries
1221
* don't have a fixed size.
1222
*
1223
* 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which
1224
* points to the beginning of each entry. The size of the entry is then
1225
* inferred by the location of the subsequent annotation (or end of
1226
* section).
1227
*
1228
* 3) Simple array of pointers: If the special section is just a basic array of
1229
* pointers, the entry size can be inferred by the number of relocations.
1230
* No annotations needed.
1231
*
1232
* Note I also tried to create per-entry symbols at the time of creation, in
1233
* the original [inline] asm. Unfortunately, creating uniquely named symbols
1234
* is trickier than one might think, especially with Clang inline asm. I
1235
* eventually just gave up trying to make that work, in favor of using
1236
* ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact.
1237
*/
1238
static int create_fake_symbols(struct elf *elf)
1239
{
1240
struct section *sec;
1241
struct reloc *reloc;
1242
1243
/*
1244
* 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries:
1245
*/
1246
1247
sec = find_section_by_name(elf, ".discard.annotate_data");
1248
if (!sec || !sec->rsec)
1249
return 0;
1250
1251
for_each_reloc(sec->rsec, reloc) {
1252
unsigned long offset, size;
1253
struct reloc *next_reloc;
1254
1255
if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL)
1256
continue;
1257
1258
offset = reloc_addend(reloc);
1259
1260
size = 0;
1261
next_reloc = reloc;
1262
for_each_reloc_continue(sec->rsec, next_reloc) {
1263
if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL ||
1264
next_reloc->sym->sec != reloc->sym->sec)
1265
continue;
1266
1267
size = reloc_addend(next_reloc) - offset;
1268
break;
1269
}
1270
1271
if (!size)
1272
size = sec_size(reloc->sym->sec) - offset;
1273
1274
if (create_fake_symbol(elf, reloc->sym->sec, offset, size))
1275
return -1;
1276
}
1277
1278
/*
1279
* 2) Make symbols for sh_entsize, and simple arrays of pointers:
1280
*/
1281
1282
for_each_sec(elf, sec) {
1283
unsigned int entry_size;
1284
unsigned long offset;
1285
1286
if (!is_special_section(sec) || find_symbol_by_offset(sec, 0))
1287
continue;
1288
1289
if (!sec->rsec) {
1290
ERROR("%s: missing special section relocations", sec->name);
1291
return -1;
1292
}
1293
1294
entry_size = sec->sh.sh_entsize;
1295
if (!entry_size) {
1296
entry_size = arch_reloc_size(sec->rsec->relocs);
1297
if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) {
1298
ERROR("%s: missing special section entsize or annotations", sec->name);
1299
return -1;
1300
}
1301
}
1302
1303
for (offset = 0; offset < sec_size(sec); offset += entry_size) {
1304
if (create_fake_symbol(elf, sec, offset, entry_size))
1305
return -1;
1306
}
1307
}
1308
1309
return 0;
1310
}
1311
1312
/* Keep a special section entry if it references an included function */
1313
static bool should_keep_special_sym(struct elf *elf, struct symbol *sym)
1314
{
1315
struct reloc *reloc;
1316
1317
if (is_sec_sym(sym) || !sym->sec->rsec)
1318
return false;
1319
1320
sym_for_each_reloc(elf, sym, reloc) {
1321
if (convert_reloc_sym(elf, reloc))
1322
continue;
1323
1324
if (is_func_sym(reloc->sym) && reloc->sym->included)
1325
return true;
1326
}
1327
1328
return false;
1329
}
1330
1331
/*
1332
* Klp relocations aren't allowed for __jump_table and .static_call_sites if
1333
* the referenced symbol lives in a kernel module, because such klp relocs may
1334
* be applied after static branch/call init, resulting in code corruption.
1335
*
1336
* Validate a special section entry to avoid that. Note that an inert
1337
* tracepoint is harmless enough, in that case just skip the entry and print a
1338
* warning. Otherwise, return an error.
1339
*
1340
* This is only a temporary limitation which will be fixed when livepatch adds
1341
* support for submodules: fully self-contained modules which are embedded in
1342
* the top-level livepatch module's data and which can be loaded on demand when
1343
* their corresponding to-be-patched module gets loaded. Then klp relocs can
1344
* be retired.
1345
*
1346
* Return:
1347
* -1: error: validation failed
1348
* 1: warning: tracepoint skipped
1349
* 0: success
1350
*/
1351
static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym)
1352
{
1353
bool static_branch = !strcmp(sym->sec->name, "__jump_table");
1354
bool static_call = !strcmp(sym->sec->name, ".static_call_sites");
1355
struct symbol *code_sym = NULL;
1356
unsigned long code_offset = 0;
1357
struct reloc *reloc;
1358
int ret = 0;
1359
1360
if (!static_branch && !static_call)
1361
return 0;
1362
1363
sym_for_each_reloc(e->patched, sym, reloc) {
1364
const char *sym_modname;
1365
struct export *export;
1366
1367
/* Static branch/call keys are always STT_OBJECT */
1368
if (reloc->sym->type != STT_OBJECT) {
1369
1370
/* Save code location which can be printed below */
1371
if (reloc->sym->type == STT_FUNC && !code_sym) {
1372
code_sym = reloc->sym;
1373
code_offset = reloc_addend(reloc);
1374
}
1375
1376
continue;
1377
}
1378
1379
if (!klp_reloc_needed(reloc))
1380
continue;
1381
1382
export = find_export(reloc->sym);
1383
if (export) {
1384
sym_modname = export->mod;
1385
} else {
1386
sym_modname = find_modname(e);
1387
if (!sym_modname)
1388
return -1;
1389
}
1390
1391
/* vmlinux keys are ok */
1392
if (!strcmp(sym_modname, "vmlinux"))
1393
continue;
1394
1395
if (static_branch) {
1396
if (strstarts(reloc->sym->name, "__tracepoint_")) {
1397
WARN("%s: disabling unsupported tracepoint %s",
1398
code_sym->name, reloc->sym->name + 13);
1399
ret = 1;
1400
continue;
1401
}
1402
1403
ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead",
1404
code_sym->name, code_offset, reloc->sym->name);
1405
return -1;
1406
}
1407
1408
/* static call */
1409
if (strstarts(reloc->sym->name, "__SCK__tp_func_")) {
1410
ret = 1;
1411
continue;
1412
}
1413
1414
ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead",
1415
code_sym->name, code_offset, reloc->sym->name);
1416
return -1;
1417
}
1418
1419
return ret;
1420
}
1421
1422
static int clone_special_section(struct elfs *e, struct section *patched_sec)
1423
{
1424
struct symbol *patched_sym;
1425
1426
/*
1427
* Extract all special section symbols (and their dependencies) which
1428
* reference included functions.
1429
*/
1430
sec_for_each_sym(patched_sec, patched_sym) {
1431
int ret;
1432
1433
if (!is_object_sym(patched_sym))
1434
continue;
1435
1436
if (!should_keep_special_sym(e->patched, patched_sym))
1437
continue;
1438
1439
ret = validate_special_section_klp_reloc(e, patched_sym);
1440
if (ret < 0)
1441
return -1;
1442
if (ret > 0)
1443
continue;
1444
1445
if (!clone_symbol(e, patched_sym, true))
1446
return -1;
1447
}
1448
1449
return 0;
1450
}
1451
1452
/* Extract only the needed bits from special sections */
1453
static int clone_special_sections(struct elfs *e)
1454
{
1455
struct section *patched_sec;
1456
1457
for_each_sec(e->patched, patched_sec) {
1458
if (is_special_section(patched_sec)) {
1459
if (clone_special_section(e, patched_sec))
1460
return -1;
1461
}
1462
}
1463
1464
return 0;
1465
}
1466
1467
/*
1468
* Create .init.klp_objects and .init.klp_funcs sections which are intermediate
1469
* sections provided as input to the patch module's init code for building the
1470
* klp_patch, klp_object and klp_func structs for the livepatch API.
1471
*/
1472
static int create_klp_sections(struct elfs *e)
1473
{
1474
size_t obj_size = sizeof(struct klp_object_ext);
1475
size_t func_size = sizeof(struct klp_func_ext);
1476
struct section *obj_sec, *funcs_sec, *str_sec;
1477
struct symbol *funcs_sym, *str_sym, *sym;
1478
char sym_name[SYM_NAME_LEN];
1479
unsigned int nr_funcs = 0;
1480
const char *modname;
1481
void *obj_data;
1482
s64 addend;
1483
1484
obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0);
1485
if (!obj_sec)
1486
return -1;
1487
1488
funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0);
1489
if (!funcs_sec)
1490
return -1;
1491
1492
funcs_sym = elf_create_section_symbol(e->out, funcs_sec);
1493
if (!funcs_sym)
1494
return -1;
1495
1496
str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0,
1497
SHT_PROGBITS, 1,
1498
SHF_ALLOC | SHF_STRINGS | SHF_MERGE);
1499
if (!str_sec)
1500
return -1;
1501
1502
if (elf_add_string(e->out, str_sec, "") == -1)
1503
return -1;
1504
1505
str_sym = elf_create_section_symbol(e->out, str_sec);
1506
if (!str_sym)
1507
return -1;
1508
1509
/* allocate klp_object_ext */
1510
obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size);
1511
if (!obj_data)
1512
return -1;
1513
1514
modname = find_modname(e);
1515
if (!modname)
1516
return -1;
1517
1518
/* klp_object_ext.name */
1519
if (strcmp(modname, "vmlinux")) {
1520
addend = elf_add_string(e->out, str_sec, modname);
1521
if (addend == -1)
1522
return -1;
1523
1524
if (!elf_create_reloc(e->out, obj_sec,
1525
offsetof(struct klp_object_ext, name),
1526
str_sym, addend, R_ABS64))
1527
return -1;
1528
}
1529
1530
/* klp_object_ext.funcs */
1531
if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs),
1532
funcs_sym, 0, R_ABS64))
1533
return -1;
1534
1535
for_each_sym(e->out, sym) {
1536
unsigned long offset = nr_funcs * func_size;
1537
unsigned long sympos;
1538
void *func_data;
1539
1540
if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed)
1541
continue;
1542
1543
/* allocate klp_func_ext */
1544
func_data = elf_add_data(e->out, funcs_sec, NULL, func_size);
1545
if (!func_data)
1546
return -1;
1547
1548
/* klp_func_ext.old_name */
1549
addend = elf_add_string(e->out, str_sec, sym->clone->twin->name);
1550
if (addend == -1)
1551
return -1;
1552
1553
if (!elf_create_reloc(e->out, funcs_sec,
1554
offset + offsetof(struct klp_func_ext, old_name),
1555
str_sym, addend, R_ABS64))
1556
return -1;
1557
1558
/* klp_func_ext.new_func */
1559
if (!elf_create_reloc(e->out, funcs_sec,
1560
offset + offsetof(struct klp_func_ext, new_func),
1561
sym, 0, R_ABS64))
1562
return -1;
1563
1564
/* klp_func_ext.sympos */
1565
BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos));
1566
sympos = find_sympos(e->orig, sym->clone->twin);
1567
if (sympos == ULONG_MAX)
1568
return -1;
1569
memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos,
1570
sizeof_field(struct klp_func_ext, sympos));
1571
1572
nr_funcs++;
1573
}
1574
1575
/* klp_object_ext.nr_funcs */
1576
BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs));
1577
memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs,
1578
sizeof_field(struct klp_object_ext, nr_funcs));
1579
1580
/*
1581
* Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and
1582
* friends, and add them to the klp object.
1583
*/
1584
1585
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname))
1586
return -1;
1587
1588
sym = find_symbol_by_name(e->out, sym_name);
1589
if (sym) {
1590
struct reloc *reloc;
1591
1592
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1593
1594
if (!elf_create_reloc(e->out, obj_sec,
1595
offsetof(struct klp_object_ext, callbacks) +
1596
offsetof(struct klp_callbacks, pre_patch),
1597
reloc->sym, reloc_addend(reloc), R_ABS64))
1598
return -1;
1599
}
1600
1601
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname))
1602
return -1;
1603
1604
sym = find_symbol_by_name(e->out, sym_name);
1605
if (sym) {
1606
struct reloc *reloc;
1607
1608
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1609
1610
if (!elf_create_reloc(e->out, obj_sec,
1611
offsetof(struct klp_object_ext, callbacks) +
1612
offsetof(struct klp_callbacks, post_patch),
1613
reloc->sym, reloc_addend(reloc), R_ABS64))
1614
return -1;
1615
}
1616
1617
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname))
1618
return -1;
1619
1620
sym = find_symbol_by_name(e->out, sym_name);
1621
if (sym) {
1622
struct reloc *reloc;
1623
1624
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1625
1626
if (!elf_create_reloc(e->out, obj_sec,
1627
offsetof(struct klp_object_ext, callbacks) +
1628
offsetof(struct klp_callbacks, pre_unpatch),
1629
reloc->sym, reloc_addend(reloc), R_ABS64))
1630
return -1;
1631
}
1632
1633
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname))
1634
return -1;
1635
1636
sym = find_symbol_by_name(e->out, sym_name);
1637
if (sym) {
1638
struct reloc *reloc;
1639
1640
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1641
1642
if (!elf_create_reloc(e->out, obj_sec,
1643
offsetof(struct klp_object_ext, callbacks) +
1644
offsetof(struct klp_callbacks, post_unpatch),
1645
reloc->sym, reloc_addend(reloc), R_ABS64))
1646
return -1;
1647
}
1648
1649
return 0;
1650
}
1651
1652
/*
1653
* Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols
1654
* can be accessed via normal relocs.
1655
*/
1656
static int copy_import_ns(struct elfs *e)
1657
{
1658
struct section *patched_sec, *out_sec = NULL;
1659
char *import_ns, *data_end;
1660
1661
patched_sec = find_section_by_name(e->patched, ".modinfo");
1662
if (!patched_sec)
1663
return 0;
1664
1665
import_ns = patched_sec->data->d_buf;
1666
if (!import_ns)
1667
return 0;
1668
1669
for (data_end = import_ns + sec_size(patched_sec);
1670
import_ns < data_end;
1671
import_ns += strlen(import_ns) + 1) {
1672
1673
import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10);
1674
if (!import_ns)
1675
return 0;
1676
1677
if (!out_sec) {
1678
out_sec = find_section_by_name(e->out, ".modinfo");
1679
if (!out_sec) {
1680
out_sec = elf_create_section(e->out, ".modinfo", 0,
1681
patched_sec->sh.sh_entsize,
1682
patched_sec->sh.sh_type,
1683
patched_sec->sh.sh_addralign,
1684
patched_sec->sh.sh_flags);
1685
if (!out_sec)
1686
return -1;
1687
}
1688
}
1689
1690
if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1))
1691
return -1;
1692
}
1693
1694
return 0;
1695
}
1696
1697
int cmd_klp_diff(int argc, const char **argv)
1698
{
1699
struct elfs e = {0};
1700
1701
argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0);
1702
if (argc != 3)
1703
usage_with_options(klp_diff_usage, klp_diff_options);
1704
1705
objname = argv[0];
1706
1707
e.orig = elf_open_read(argv[0], O_RDONLY);
1708
e.patched = elf_open_read(argv[1], O_RDONLY);
1709
e.out = NULL;
1710
1711
if (!e.orig || !e.patched)
1712
return -1;
1713
1714
if (read_exports())
1715
return -1;
1716
1717
if (read_sym_checksums(e.orig))
1718
return -1;
1719
1720
if (read_sym_checksums(e.patched))
1721
return -1;
1722
1723
if (correlate_symbols(&e))
1724
return -1;
1725
1726
if (mark_changed_functions(&e))
1727
return 0;
1728
1729
e.out = elf_create_file(&e.orig->ehdr, argv[2]);
1730
if (!e.out)
1731
return -1;
1732
1733
/*
1734
* Special section fake symbols are needed so that individual special
1735
* section entries can be extracted by clone_special_sections().
1736
*
1737
* Note the fake symbols are also needed by clone_included_functions()
1738
* because __WARN_printf() call sites add references to bug table
1739
* entries in the calling functions.
1740
*/
1741
if (create_fake_symbols(e.patched))
1742
return -1;
1743
1744
if (clone_included_functions(&e))
1745
return -1;
1746
1747
if (clone_special_sections(&e))
1748
return -1;
1749
1750
if (create_klp_sections(&e))
1751
return -1;
1752
1753
if (copy_import_ns(&e))
1754
return -1;
1755
1756
if (elf_write(e.out))
1757
return -1;
1758
1759
return elf_close(e.out);
1760
}
1761
1762