Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/kernel/module.c
49092 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Kernel module help for s390.
4
*
5
* S390 version
6
* Copyright IBM Corp. 2002, 2003
7
* Author(s): Arnd Bergmann ([email protected])
8
* Martin Schwidefsky ([email protected])
9
*
10
* based on i386 version
11
* Copyright (C) 2001 Rusty Russell.
12
*/
13
#include <linux/module.h>
14
#include <linux/elf.h>
15
#include <linux/vmalloc.h>
16
#include <linux/fs.h>
17
#include <linux/ftrace.h>
18
#include <linux/string.h>
19
#include <linux/kernel.h>
20
#include <linux/kasan.h>
21
#include <linux/moduleloader.h>
22
#include <linux/bug.h>
23
#include <linux/memory.h>
24
#include <linux/execmem.h>
25
#include <asm/arch-stackprotector.h>
26
#include <asm/alternative.h>
27
#include <asm/nospec-branch.h>
28
#include <asm/facility.h>
29
#include <asm/ftrace.lds.h>
30
#include <asm/set_memory.h>
31
#include <asm/setup.h>
32
#include <asm/asm-offsets.h>
33
34
#if 0
35
#define DEBUGP printk
36
#else
37
#define DEBUGP(fmt , ...)
38
#endif
39
40
#define PLT_ENTRY_SIZE 22
41
42
#ifdef CONFIG_FUNCTION_TRACER
43
void module_arch_cleanup(struct module *mod)
44
{
45
execmem_free(mod->arch.trampolines_start);
46
}
47
#endif
48
49
void module_arch_freeing_init(struct module *mod)
50
{
51
if (is_livepatch_module(mod) &&
52
mod->state == MODULE_STATE_LIVE)
53
return;
54
55
vfree(mod->arch.syminfo);
56
mod->arch.syminfo = NULL;
57
}
58
59
static void check_rela(Elf_Rela *rela, struct module *me)
60
{
61
struct mod_arch_syminfo *info;
62
63
info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
64
switch (ELF_R_TYPE (rela->r_info)) {
65
case R_390_GOT12: /* 12 bit GOT offset. */
66
case R_390_GOT16: /* 16 bit GOT offset. */
67
case R_390_GOT20: /* 20 bit GOT offset. */
68
case R_390_GOT32: /* 32 bit GOT offset. */
69
case R_390_GOT64: /* 64 bit GOT offset. */
70
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
71
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
72
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
73
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
74
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
75
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
76
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
77
if (info->got_offset == -1UL) {
78
info->got_offset = me->arch.got_size;
79
me->arch.got_size += sizeof(void*);
80
}
81
break;
82
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
83
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
84
case R_390_PLT32: /* 32 bit PC relative PLT address. */
85
case R_390_PLT64: /* 64 bit PC relative PLT address. */
86
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
87
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
88
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
89
if (info->plt_offset == -1UL) {
90
info->plt_offset = me->arch.plt_size;
91
me->arch.plt_size += PLT_ENTRY_SIZE;
92
}
93
break;
94
case R_390_COPY:
95
case R_390_GLOB_DAT:
96
case R_390_JMP_SLOT:
97
case R_390_RELATIVE:
98
/* Only needed if we want to support loading of
99
modules linked with -shared. */
100
break;
101
}
102
}
103
104
/*
105
* Account for GOT and PLT relocations. We can't add sections for
106
* got and plt but we can increase the core module size.
107
*/
108
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
109
char *secstrings, struct module *me)
110
{
111
Elf_Shdr *symtab;
112
Elf_Sym *symbols;
113
Elf_Rela *rela;
114
char *strings;
115
int nrela, i, j;
116
struct module_memory *mod_mem;
117
118
/* Find symbol table and string table. */
119
symtab = NULL;
120
for (i = 0; i < hdr->e_shnum; i++)
121
switch (sechdrs[i].sh_type) {
122
case SHT_SYMTAB:
123
symtab = sechdrs + i;
124
break;
125
}
126
if (!symtab) {
127
printk(KERN_ERR "module %s: no symbol table\n", me->name);
128
return -ENOEXEC;
129
}
130
131
/* Allocate one syminfo structure per symbol. */
132
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
133
me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
134
me->arch.nsyms));
135
if (!me->arch.syminfo)
136
return -ENOMEM;
137
symbols = (void *) hdr + symtab->sh_offset;
138
strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
139
for (i = 0; i < me->arch.nsyms; i++) {
140
if (symbols[i].st_shndx == SHN_UNDEF &&
141
strcmp(strings + symbols[i].st_name,
142
"_GLOBAL_OFFSET_TABLE_") == 0)
143
/* "Define" it as absolute. */
144
symbols[i].st_shndx = SHN_ABS;
145
me->arch.syminfo[i].got_offset = -1UL;
146
me->arch.syminfo[i].plt_offset = -1UL;
147
me->arch.syminfo[i].got_initialized = 0;
148
me->arch.syminfo[i].plt_initialized = 0;
149
}
150
151
/* Search for got/plt relocations. */
152
me->arch.got_size = me->arch.plt_size = 0;
153
for (i = 0; i < hdr->e_shnum; i++) {
154
if (sechdrs[i].sh_type != SHT_RELA)
155
continue;
156
nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
157
rela = (void *) hdr + sechdrs[i].sh_offset;
158
for (j = 0; j < nrela; j++)
159
check_rela(rela + j, me);
160
}
161
162
/* Increase core size by size of got & plt and set start
163
offsets for got and plt. */
164
mod_mem = &me->mem[MOD_TEXT];
165
mod_mem->size = ALIGN(mod_mem->size, 4);
166
me->arch.got_offset = mod_mem->size;
167
mod_mem->size += me->arch.got_size;
168
me->arch.plt_offset = mod_mem->size;
169
if (me->arch.plt_size) {
170
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
171
me->arch.plt_size += PLT_ENTRY_SIZE;
172
mod_mem->size += me->arch.plt_size;
173
}
174
return 0;
175
}
176
177
static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
178
int sign, int bits, int shift,
179
void *(*write)(void *dest, const void *src, size_t len))
180
{
181
unsigned long umax;
182
long min, max;
183
void *dest = (void *)loc;
184
185
if (val & ((1UL << shift) - 1))
186
return -ENOEXEC;
187
if (sign) {
188
val = (Elf_Addr)(((long) val) >> shift);
189
min = -(1L << (bits - 1));
190
max = (1L << (bits - 1)) - 1;
191
if ((long) val < min || (long) val > max)
192
return -ENOEXEC;
193
} else {
194
val >>= shift;
195
umax = ((1UL << (bits - 1)) << 1) - 1;
196
if ((unsigned long) val > umax)
197
return -ENOEXEC;
198
}
199
200
if (bits == 8) {
201
unsigned char tmp = val;
202
write(dest, &tmp, 1);
203
} else if (bits == 12) {
204
unsigned short tmp = (val & 0xfff) |
205
(*(unsigned short *) loc & 0xf000);
206
write(dest, &tmp, 2);
207
} else if (bits == 16) {
208
unsigned short tmp = val;
209
write(dest, &tmp, 2);
210
} else if (bits == 20) {
211
unsigned int tmp = (val & 0xfff) << 16 |
212
(val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
213
write(dest, &tmp, 4);
214
} else if (bits == 32) {
215
unsigned int tmp = val;
216
write(dest, &tmp, 4);
217
} else if (bits == 64) {
218
unsigned long tmp = val;
219
write(dest, &tmp, 8);
220
}
221
return 0;
222
}
223
224
static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
225
const char *strtab, struct module *me,
226
void *(*write)(void *dest, const void *src, size_t len))
227
{
228
struct mod_arch_syminfo *info;
229
Elf_Addr loc, val;
230
int r_type, r_sym;
231
int rc = -ENOEXEC;
232
233
/* This is where to make the change */
234
loc = base + rela->r_offset;
235
/* This is the symbol it is referring to. Note that all
236
undefined symbols have been resolved. */
237
r_sym = ELF_R_SYM(rela->r_info);
238
r_type = ELF_R_TYPE(rela->r_info);
239
info = me->arch.syminfo + r_sym;
240
val = symtab[r_sym].st_value;
241
242
switch (r_type) {
243
case R_390_NONE: /* No relocation. */
244
rc = 0;
245
break;
246
case R_390_8: /* Direct 8 bit. */
247
case R_390_12: /* Direct 12 bit. */
248
case R_390_16: /* Direct 16 bit. */
249
case R_390_20: /* Direct 20 bit. */
250
case R_390_32: /* Direct 32 bit. */
251
case R_390_64: /* Direct 64 bit. */
252
val += rela->r_addend;
253
if (r_type == R_390_8)
254
rc = apply_rela_bits(loc, val, 0, 8, 0, write);
255
else if (r_type == R_390_12)
256
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
257
else if (r_type == R_390_16)
258
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
259
else if (r_type == R_390_20)
260
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
261
else if (r_type == R_390_32)
262
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
263
else if (r_type == R_390_64)
264
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
265
break;
266
case R_390_PC16: /* PC relative 16 bit. */
267
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
268
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
269
case R_390_PC32: /* PC relative 32 bit. */
270
case R_390_PC64: /* PC relative 64 bit. */
271
val += rela->r_addend - loc;
272
if (r_type == R_390_PC16)
273
rc = apply_rela_bits(loc, val, 1, 16, 0, write);
274
else if (r_type == R_390_PC16DBL)
275
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
276
else if (r_type == R_390_PC32DBL)
277
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
278
else if (r_type == R_390_PC32)
279
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
280
else if (r_type == R_390_PC64)
281
rc = apply_rela_bits(loc, val, 1, 64, 0, write);
282
break;
283
case R_390_GOT12: /* 12 bit GOT offset. */
284
case R_390_GOT16: /* 16 bit GOT offset. */
285
case R_390_GOT20: /* 20 bit GOT offset. */
286
case R_390_GOT32: /* 32 bit GOT offset. */
287
case R_390_GOT64: /* 64 bit GOT offset. */
288
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
289
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
290
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
291
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
292
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
293
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
294
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
295
if (info->got_initialized == 0) {
296
Elf_Addr *gotent = me->mem[MOD_TEXT].base +
297
me->arch.got_offset +
298
info->got_offset;
299
300
write(gotent, &val, sizeof(*gotent));
301
info->got_initialized = 1;
302
}
303
val = info->got_offset + rela->r_addend;
304
if (r_type == R_390_GOT12 ||
305
r_type == R_390_GOTPLT12)
306
rc = apply_rela_bits(loc, val, 0, 12, 0, write);
307
else if (r_type == R_390_GOT16 ||
308
r_type == R_390_GOTPLT16)
309
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
310
else if (r_type == R_390_GOT20 ||
311
r_type == R_390_GOTPLT20)
312
rc = apply_rela_bits(loc, val, 1, 20, 0, write);
313
else if (r_type == R_390_GOT32 ||
314
r_type == R_390_GOTPLT32)
315
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
316
else if (r_type == R_390_GOT64 ||
317
r_type == R_390_GOTPLT64)
318
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
319
else if (r_type == R_390_GOTENT ||
320
r_type == R_390_GOTPLTENT) {
321
val += (Elf_Addr)me->mem[MOD_TEXT].base +
322
me->arch.got_offset - loc;
323
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
324
}
325
break;
326
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
327
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
328
case R_390_PLT32: /* 32 bit PC relative PLT address. */
329
case R_390_PLT64: /* 64 bit PC relative PLT address. */
330
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
331
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
332
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
333
if (info->plt_initialized == 0) {
334
unsigned char insn[PLT_ENTRY_SIZE];
335
char *plt_base;
336
char *ip;
337
338
plt_base = me->mem[MOD_TEXT].base + me->arch.plt_offset;
339
ip = plt_base + info->plt_offset;
340
*(int *)insn = 0x0d10e310; /* basr 1,0 */
341
*(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
342
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
343
char *jump_r1;
344
345
jump_r1 = plt_base + me->arch.plt_size -
346
PLT_ENTRY_SIZE;
347
/* brcl 0xf,__jump_r1 */
348
*(short *)&insn[8] = 0xc0f4;
349
*(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
350
} else {
351
*(int *)&insn[8] = 0x07f10000; /* br %r1 */
352
}
353
*(long *)&insn[14] = val;
354
355
write(ip, insn, sizeof(insn));
356
info->plt_initialized = 1;
357
}
358
if (r_type == R_390_PLTOFF16 ||
359
r_type == R_390_PLTOFF32 ||
360
r_type == R_390_PLTOFF64)
361
val = me->arch.plt_offset - me->arch.got_offset +
362
info->plt_offset + rela->r_addend;
363
else {
364
if (!((r_type == R_390_PLT16DBL &&
365
val - loc + 0xffffUL < 0x1ffffeUL) ||
366
(r_type == R_390_PLT32DBL &&
367
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
368
val = (Elf_Addr) me->mem[MOD_TEXT].base +
369
me->arch.plt_offset +
370
info->plt_offset;
371
val += rela->r_addend - loc;
372
}
373
if (r_type == R_390_PLT16DBL)
374
rc = apply_rela_bits(loc, val, 1, 16, 1, write);
375
else if (r_type == R_390_PLTOFF16)
376
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
377
else if (r_type == R_390_PLT32DBL)
378
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
379
else if (r_type == R_390_PLT32 ||
380
r_type == R_390_PLTOFF32)
381
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
382
else if (r_type == R_390_PLT64 ||
383
r_type == R_390_PLTOFF64)
384
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
385
break;
386
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
387
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
388
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
389
val = val + rela->r_addend -
390
((Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset);
391
if (r_type == R_390_GOTOFF16)
392
rc = apply_rela_bits(loc, val, 0, 16, 0, write);
393
else if (r_type == R_390_GOTOFF32)
394
rc = apply_rela_bits(loc, val, 0, 32, 0, write);
395
else if (r_type == R_390_GOTOFF64)
396
rc = apply_rela_bits(loc, val, 0, 64, 0, write);
397
break;
398
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
399
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
400
val = (Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset +
401
rela->r_addend - loc;
402
if (r_type == R_390_GOTPC)
403
rc = apply_rela_bits(loc, val, 1, 32, 0, write);
404
else if (r_type == R_390_GOTPCDBL)
405
rc = apply_rela_bits(loc, val, 1, 32, 1, write);
406
break;
407
case R_390_COPY:
408
case R_390_GLOB_DAT: /* Create GOT entry. */
409
case R_390_JMP_SLOT: /* Create PLT entry. */
410
case R_390_RELATIVE: /* Adjust by program base. */
411
/* Only needed if we want to support loading of
412
modules linked with -shared. */
413
return -ENOEXEC;
414
default:
415
printk(KERN_ERR "module %s: unknown relocation: %u\n",
416
me->name, r_type);
417
return -ENOEXEC;
418
}
419
if (rc) {
420
printk(KERN_ERR "module %s: relocation error for symbol %s "
421
"(r_type %i, value 0x%lx)\n",
422
me->name, strtab + symtab[r_sym].st_name,
423
r_type, (unsigned long) val);
424
return rc;
425
}
426
return 0;
427
}
428
429
static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
430
unsigned int symindex, unsigned int relsec,
431
struct module *me,
432
void *(*write)(void *dest, const void *src, size_t len))
433
{
434
Elf_Addr base;
435
Elf_Sym *symtab;
436
Elf_Rela *rela;
437
unsigned long i, n;
438
int rc;
439
440
DEBUGP("Applying relocate section %u to %u\n",
441
relsec, sechdrs[relsec].sh_info);
442
base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
443
symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
444
rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
445
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
446
447
for (i = 0; i < n; i++, rela++) {
448
rc = apply_rela(rela, base, symtab, strtab, me, write);
449
if (rc)
450
return rc;
451
}
452
return 0;
453
}
454
455
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
456
unsigned int symindex, unsigned int relsec,
457
struct module *me)
458
{
459
bool early = me->state == MODULE_STATE_UNFORMED;
460
void *(*write)(void *, const void *, size_t) = memcpy;
461
462
if (!early)
463
write = s390_kernel_write;
464
465
return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
466
write);
467
}
468
469
#ifdef CONFIG_FUNCTION_TRACER
470
static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
471
const Elf_Shdr *s)
472
{
473
char *start, *end;
474
int numpages;
475
size_t size;
476
477
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
478
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
479
start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE);
480
if (!start)
481
return -ENOMEM;
482
set_memory_rox((unsigned long)start, numpages);
483
end = start + size;
484
485
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
486
me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
487
me->arch.next_trampoline = me->arch.trampolines_start;
488
489
return 0;
490
}
491
#endif /* CONFIG_FUNCTION_TRACER */
492
493
int module_finalize(const Elf_Ehdr *hdr,
494
const Elf_Shdr *sechdrs,
495
struct module *me)
496
{
497
const Elf_Shdr *s;
498
char *secstrings, *secname;
499
void *aseg;
500
int rc = 0;
501
502
if (IS_ENABLED(CONFIG_EXPOLINE) &&
503
!nospec_disable && me->arch.plt_size) {
504
unsigned int *ij;
505
506
ij = me->mem[MOD_TEXT].base + me->arch.plt_offset +
507
me->arch.plt_size - PLT_ENTRY_SIZE;
508
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
509
ij[1] = 0x0005a7f4; /* j . */
510
ij[2] = 0x000007f1; /* br %r1 */
511
}
512
513
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
514
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
515
aseg = (void *) s->sh_addr;
516
secname = secstrings + s->sh_name;
517
518
if (!strcmp(".altinstructions", secname))
519
/* patch .altinstructions */
520
apply_alternatives(aseg, aseg + s->sh_size);
521
522
if (IS_ENABLED(CONFIG_EXPOLINE) &&
523
(str_has_prefix(secname, ".s390_indirect")))
524
nospec_revert(aseg, aseg + s->sh_size);
525
526
if (IS_ENABLED(CONFIG_EXPOLINE) &&
527
(str_has_prefix(secname, ".s390_return")))
528
nospec_revert(aseg, aseg + s->sh_size);
529
530
if (IS_ENABLED(CONFIG_STACKPROTECTOR) &&
531
(str_has_prefix(secname, "__stack_protector_loc"))) {
532
rc = stack_protector_apply(aseg, aseg + s->sh_size);
533
if (rc)
534
break;
535
}
536
537
#ifdef CONFIG_FUNCTION_TRACER
538
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
539
rc = module_alloc_ftrace_hotpatch_trampolines(me, s);
540
if (rc)
541
break;
542
}
543
#endif /* CONFIG_FUNCTION_TRACER */
544
}
545
546
return rc;
547
}
548
549