Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/kernel/module.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Kernel module help for s390.
4
*
5
* S390 version
6
* Copyright IBM Corp. 2002, 2003
7
* Author(s): Arnd Bergmann ([email protected])
8
* Martin Schwidefsky ([email protected])
9
*
10
* based on i386 version
11
* Copyright (C) 2001 Rusty Russell.
12
*/
13
#include <linux/module.h>
14
#include <linux/elf.h>
15
#include <linux/vmalloc.h>
16
#include <linux/fs.h>
17
#include <linux/ftrace.h>
18
#include <linux/string.h>
19
#include <linux/kernel.h>
20
#include <linux/kasan.h>
21
#include <linux/moduleloader.h>
22
#include <linux/bug.h>
23
#include <linux/memory.h>
24
#include <linux/execmem.h>
25
#include <asm/alternative.h>
26
#include <asm/nospec-branch.h>
27
#include <asm/facility.h>
28
#include <asm/ftrace.lds.h>
29
#include <asm/set_memory.h>
30
#include <asm/setup.h>
31
32
#if 0
33
#define DEBUGP printk
34
#else
35
#define DEBUGP(fmt , ...)
36
#endif
37
38
#define PLT_ENTRY_SIZE 22
39
40
#ifdef CONFIG_FUNCTION_TRACER
41
void module_arch_cleanup(struct module *mod)
42
{
43
execmem_free(mod->arch.trampolines_start);
44
}
45
#endif
46
47
void module_arch_freeing_init(struct module *mod)
48
{
49
if (is_livepatch_module(mod) &&
50
mod->state == MODULE_STATE_LIVE)
51
return;
52
53
vfree(mod->arch.syminfo);
54
mod->arch.syminfo = NULL;
55
}
56
57
static void check_rela(Elf_Rela *rela, struct module *me)
58
{
59
struct mod_arch_syminfo *info;
60
61
info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
62
switch (ELF_R_TYPE (rela->r_info)) {
63
case R_390_GOT12: /* 12 bit GOT offset. */
64
case R_390_GOT16: /* 16 bit GOT offset. */
65
case R_390_GOT20: /* 20 bit GOT offset. */
66
case R_390_GOT32: /* 32 bit GOT offset. */
67
case R_390_GOT64: /* 64 bit GOT offset. */
68
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
69
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
70
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
71
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
72
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
73
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
74
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
75
if (info->got_offset == -1UL) {
76
info->got_offset = me->arch.got_size;
77
me->arch.got_size += sizeof(void*);
78
}
79
break;
80
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
81
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
82
case R_390_PLT32: /* 32 bit PC relative PLT address. */
83
case R_390_PLT64: /* 64 bit PC relative PLT address. */
84
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
85
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
86
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
87
if (info->plt_offset == -1UL) {
88
info->plt_offset = me->arch.plt_size;
89
me->arch.plt_size += PLT_ENTRY_SIZE;
90
}
91
break;
92
case R_390_COPY:
93
case R_390_GLOB_DAT:
94
case R_390_JMP_SLOT:
95
case R_390_RELATIVE:
96
/* Only needed if we want to support loading of
97
modules linked with -shared. */
98
break;
99
}
100
}
101
102
/*
103
* Account for GOT and PLT relocations. We can't add sections for
104
* got and plt but we can increase the core module size.
105
*/
106
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
107
char *secstrings, struct module *me)
108
{
109
Elf_Shdr *symtab;
110
Elf_Sym *symbols;
111
Elf_Rela *rela;
112
char *strings;
113
int nrela, i, j;
114
struct module_memory *mod_mem;
115
116
/* Find symbol table and string table. */
117
symtab = NULL;
118
for (i = 0; i < hdr->e_shnum; i++)
119
switch (sechdrs[i].sh_type) {
120
case SHT_SYMTAB:
121
symtab = sechdrs + i;
122
break;
123
}
124
if (!symtab) {
125
printk(KERN_ERR "module %s: no symbol table\n", me->name);
126
return -ENOEXEC;
127
}
128
129
/* Allocate one syminfo structure per symbol. */
130
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
131
me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
132
me->arch.nsyms));
133
if (!me->arch.syminfo)
134
return -ENOMEM;
135
symbols = (void *) hdr + symtab->sh_offset;
136
strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
137
for (i = 0; i < me->arch.nsyms; i++) {
138
if (symbols[i].st_shndx == SHN_UNDEF &&
139
strcmp(strings + symbols[i].st_name,
140
"_GLOBAL_OFFSET_TABLE_") == 0)
141
/* "Define" it as absolute. */
142
symbols[i].st_shndx = SHN_ABS;
143
me->arch.syminfo[i].got_offset = -1UL;
144
me->arch.syminfo[i].plt_offset = -1UL;
145
me->arch.syminfo[i].got_initialized = 0;
146
me->arch.syminfo[i].plt_initialized = 0;
147
}
148
149
/* Search for got/plt relocations. */
150
me->arch.got_size = me->arch.plt_size = 0;
151
for (i = 0; i < hdr->e_shnum; i++) {
152
if (sechdrs[i].sh_type != SHT_RELA)
153
continue;
154
nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
155
rela = (void *) hdr + sechdrs[i].sh_offset;
156
for (j = 0; j < nrela; j++)
157
check_rela(rela + j, me);
158
}
159
160
/* Increase core size by size of got & plt and set start
161
offsets for got and plt. */
162
mod_mem = &me->mem[MOD_TEXT];
163
mod_mem->size = ALIGN(mod_mem->size, 4);
164
me->arch.got_offset = mod_mem->size;
165
mod_mem->size += me->arch.got_size;
166
me->arch.plt_offset = mod_mem->size;
167
if (me->arch.plt_size) {
168
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
169
me->arch.plt_size += PLT_ENTRY_SIZE;
170
mod_mem->size += me->arch.plt_size;
171
}
172
return 0;
173
}
174
175
static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
176
int sign, int bits, int shift,
177
void *(*write)(void *dest, const void *src, size_t len))
178
{
179
unsigned long umax;
180
long min, max;
181
void *dest = (void *)loc;
182
183
if (val & ((1UL << shift) - 1))
184
return -ENOEXEC;
185
if (sign) {
186
val = (Elf_Addr)(((long) val) >> shift);
187
min = -(1L << (bits - 1));
188
max = (1L << (bits - 1)) - 1;
189
if ((long) val < min || (long) val > max)
190
return -ENOEXEC;
191
} else {
192
val >>= shift;
193
umax = ((1UL << (bits - 1)) << 1) - 1;
194
if ((unsigned long) val > umax)
195
return -ENOEXEC;
196
}
197
198
if (bits == 8) {
199
unsigned char tmp = val;
200
write(dest, &tmp, 1);
201
} else if (bits == 12) {
202
unsigned short tmp = (val & 0xfff) |
203
(*(unsigned short *) loc & 0xf000);
204
write(dest, &tmp, 2);
205
} else if (bits == 16) {
206
unsigned short tmp = val;
207
write(dest, &tmp, 2);
208
} else if (bits == 20) {
209
unsigned int tmp = (val & 0xfff) << 16 |
210
(val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
211
write(dest, &tmp, 4);
212
} else if (bits == 32) {
213
unsigned int tmp = val;
214
write(dest, &tmp, 4);
215
} else if (bits == 64) {
216
unsigned long tmp = val;
217
write(dest, &tmp, 8);
218
}
219
return 0;
220
}
221
222
static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
223
const char *strtab, struct module *me,
224
void *(*write)(void *dest, const void *src, size_t len))
225
{
226
struct mod_arch_syminfo *info;
227
Elf_Addr loc, val;
228
int r_type, r_sym;
229
int rc = -ENOEXEC;
230
231
/* This is where to make the change */
232
loc = base + rela->r_offset;
233
/* This is the symbol it is referring to. Note that all
234
undefined symbols have been resolved. */
235
r_sym = ELF_R_SYM(rela->r_info);
236
r_type = ELF_R_TYPE(rela->r_info);
237
info = me->arch.syminfo + r_sym;
238
val = symtab[r_sym].st_value;
239
240
switch (r_type) {
241
case R_390_NONE: /* No relocation. */
242
rc = 0;
243
break;
244
case R_390_8: /* Direct 8 bit. */
245
case R_390_12: /* Direct 12 bit. */
246
case R_390_16: /* Direct 16 bit. */
247
case R_390_20: /* Direct 20 bit. */
248
case R_390_32: /* Direct 32 bit. */
249
case R_390_64: /* Direct 64 bit. */
250
val += rela->r_addend;
251
if (r_type == R_390_8)
252
rc = apply_rela_bits(loc, val, 0, 8, 0, write);
253
else if (r_type == R_390_12)
254
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
255
else if (r_type == R_390_16)
256
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
257
else if (r_type == R_390_20)
258
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
259
else if (r_type == R_390_32)
260
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
261
else if (r_type == R_390_64)
262
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
263
break;
264
case R_390_PC16: /* PC relative 16 bit. */
265
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
266
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
267
case R_390_PC32: /* PC relative 32 bit. */
268
case R_390_PC64: /* PC relative 64 bit. */
269
val += rela->r_addend - loc;
270
if (r_type == R_390_PC16)
271
rc = apply_rela_bits(loc, val, 1, 16, 0, write);
272
else if (r_type == R_390_PC16DBL)
273
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
274
else if (r_type == R_390_PC32DBL)
275
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
276
else if (r_type == R_390_PC32)
277
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
278
else if (r_type == R_390_PC64)
279
rc = apply_rela_bits(loc, val, 1, 64, 0, write);
280
break;
281
case R_390_GOT12: /* 12 bit GOT offset. */
282
case R_390_GOT16: /* 16 bit GOT offset. */
283
case R_390_GOT20: /* 20 bit GOT offset. */
284
case R_390_GOT32: /* 32 bit GOT offset. */
285
case R_390_GOT64: /* 64 bit GOT offset. */
286
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
287
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
288
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
289
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
290
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
291
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
292
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
293
if (info->got_initialized == 0) {
294
Elf_Addr *gotent = me->mem[MOD_TEXT].base +
295
me->arch.got_offset +
296
info->got_offset;
297
298
write(gotent, &val, sizeof(*gotent));
299
info->got_initialized = 1;
300
}
301
val = info->got_offset + rela->r_addend;
302
if (r_type == R_390_GOT12 ||
303
r_type == R_390_GOTPLT12)
304
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
305
else if (r_type == R_390_GOT16 ||
306
r_type == R_390_GOTPLT16)
307
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
308
else if (r_type == R_390_GOT20 ||
309
r_type == R_390_GOTPLT20)
310
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
311
else if (r_type == R_390_GOT32 ||
312
r_type == R_390_GOTPLT32)
313
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
314
else if (r_type == R_390_GOT64 ||
315
r_type == R_390_GOTPLT64)
316
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
317
else if (r_type == R_390_GOTENT ||
318
r_type == R_390_GOTPLTENT) {
319
val += (Elf_Addr)me->mem[MOD_TEXT].base +
320
me->arch.got_offset - loc;
321
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
322
}
323
break;
324
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
325
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
326
case R_390_PLT32: /* 32 bit PC relative PLT address. */
327
case R_390_PLT64: /* 64 bit PC relative PLT address. */
328
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
329
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
330
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
331
if (info->plt_initialized == 0) {
332
unsigned char insn[PLT_ENTRY_SIZE];
333
char *plt_base;
334
char *ip;
335
336
plt_base = me->mem[MOD_TEXT].base + me->arch.plt_offset;
337
ip = plt_base + info->plt_offset;
338
*(int *)insn = 0x0d10e310; /* basr 1,0 */
339
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
340
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
341
char *jump_r1;
342
343
jump_r1 = plt_base + me->arch.plt_size -
344
PLT_ENTRY_SIZE;
345
/* brcl 0xf,__jump_r1 */
346
*(short *)&insn[8] = 0xc0f4;
347
*(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
348
} else {
349
*(int *)&insn[8] = 0x07f10000; /* br %r1 */
350
}
351
*(long *)&insn[14] = val;
352
353
write(ip, insn, sizeof(insn));
354
info->plt_initialized = 1;
355
}
356
if (r_type == R_390_PLTOFF16 ||
357
r_type == R_390_PLTOFF32 ||
358
r_type == R_390_PLTOFF64)
359
val = me->arch.plt_offset - me->arch.got_offset +
360
info->plt_offset + rela->r_addend;
361
else {
362
if (!((r_type == R_390_PLT16DBL &&
363
val - loc + 0xffffUL < 0x1ffffeUL) ||
364
(r_type == R_390_PLT32DBL &&
365
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
366
val = (Elf_Addr) me->mem[MOD_TEXT].base +
367
me->arch.plt_offset +
368
info->plt_offset;
369
val += rela->r_addend - loc;
370
}
371
if (r_type == R_390_PLT16DBL)
372
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
373
else if (r_type == R_390_PLTOFF16)
374
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
375
else if (r_type == R_390_PLT32DBL)
376
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
377
else if (r_type == R_390_PLT32 ||
378
r_type == R_390_PLTOFF32)
379
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
380
else if (r_type == R_390_PLT64 ||
381
r_type == R_390_PLTOFF64)
382
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
383
break;
384
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
385
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
386
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
387
val = val + rela->r_addend -
388
((Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset);
389
if (r_type == R_390_GOTOFF16)
390
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
391
else if (r_type == R_390_GOTOFF32)
392
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
393
else if (r_type == R_390_GOTOFF64)
394
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
395
break;
396
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
397
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
398
val = (Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset +
399
rela->r_addend - loc;
400
if (r_type == R_390_GOTPC)
401
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
402
else if (r_type == R_390_GOTPCDBL)
403
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
404
break;
405
case R_390_COPY:
406
case R_390_GLOB_DAT: /* Create GOT entry. */
407
case R_390_JMP_SLOT: /* Create PLT entry. */
408
case R_390_RELATIVE: /* Adjust by program base. */
409
/* Only needed if we want to support loading of
410
modules linked with -shared. */
411
return -ENOEXEC;
412
default:
413
printk(KERN_ERR "module %s: unknown relocation: %u\n",
414
me->name, r_type);
415
return -ENOEXEC;
416
}
417
if (rc) {
418
printk(KERN_ERR "module %s: relocation error for symbol %s "
419
"(r_type %i, value 0x%lx)\n",
420
me->name, strtab + symtab[r_sym].st_name,
421
r_type, (unsigned long) val);
422
return rc;
423
}
424
return 0;
425
}
426
427
static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
428
unsigned int symindex, unsigned int relsec,
429
struct module *me,
430
void *(*write)(void *dest, const void *src, size_t len))
431
{
432
Elf_Addr base;
433
Elf_Sym *symtab;
434
Elf_Rela *rela;
435
unsigned long i, n;
436
int rc;
437
438
DEBUGP("Applying relocate section %u to %u\n",
439
relsec, sechdrs[relsec].sh_info);
440
base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
441
symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
442
rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
443
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
444
445
for (i = 0; i < n; i++, rela++) {
446
rc = apply_rela(rela, base, symtab, strtab, me, write);
447
if (rc)
448
return rc;
449
}
450
return 0;
451
}
452
453
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
454
unsigned int symindex, unsigned int relsec,
455
struct module *me)
456
{
457
bool early = me->state == MODULE_STATE_UNFORMED;
458
void *(*write)(void *, const void *, size_t) = memcpy;
459
460
if (!early)
461
write = s390_kernel_write;
462
463
return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
464
write);
465
}
466
467
#ifdef CONFIG_FUNCTION_TRACER
468
static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
469
const Elf_Shdr *s)
470
{
471
char *start, *end;
472
int numpages;
473
size_t size;
474
475
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
476
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
477
start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE);
478
if (!start)
479
return -ENOMEM;
480
set_memory_rox((unsigned long)start, numpages);
481
end = start + size;
482
483
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
484
me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
485
me->arch.next_trampoline = me->arch.trampolines_start;
486
487
return 0;
488
}
489
#endif /* CONFIG_FUNCTION_TRACER */
490
491
int module_finalize(const Elf_Ehdr *hdr,
492
const Elf_Shdr *sechdrs,
493
struct module *me)
494
{
495
const Elf_Shdr *s;
496
char *secstrings, *secname;
497
void *aseg;
498
#ifdef CONFIG_FUNCTION_TRACER
499
int ret;
500
#endif
501
502
if (IS_ENABLED(CONFIG_EXPOLINE) &&
503
!nospec_disable && me->arch.plt_size) {
504
unsigned int *ij;
505
506
ij = me->mem[MOD_TEXT].base + me->arch.plt_offset +
507
me->arch.plt_size - PLT_ENTRY_SIZE;
508
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
509
ij[1] = 0x0005a7f4; /* j . */
510
ij[2] = 0x000007f1; /* br %r1 */
511
}
512
513
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
514
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
515
aseg = (void *) s->sh_addr;
516
secname = secstrings + s->sh_name;
517
518
if (!strcmp(".altinstructions", secname))
519
/* patch .altinstructions */
520
apply_alternatives(aseg, aseg + s->sh_size);
521
522
if (IS_ENABLED(CONFIG_EXPOLINE) &&
523
(str_has_prefix(secname, ".s390_indirect")))
524
nospec_revert(aseg, aseg + s->sh_size);
525
526
if (IS_ENABLED(CONFIG_EXPOLINE) &&
527
(str_has_prefix(secname, ".s390_return")))
528
nospec_revert(aseg, aseg + s->sh_size);
529
530
#ifdef CONFIG_FUNCTION_TRACER
531
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
532
ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
533
if (ret < 0)
534
return ret;
535
}
536
#endif /* CONFIG_FUNCTION_TRACER */
537
}
538
539
return 0;
540
}
541
542