Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/kernel/module.c
10817 views
1
/*
2
* linux/arch/arm/kernel/module.c
3
*
4
* Copyright (C) 2002 Russell King.
5
* Modified for nommu by Hyok S. Choi
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License version 2 as
9
* published by the Free Software Foundation.
10
*
11
* Module allocation method suggested by Andi Kleen.
12
*/
13
#include <linux/module.h>
14
#include <linux/moduleloader.h>
15
#include <linux/kernel.h>
16
#include <linux/mm.h>
17
#include <linux/elf.h>
18
#include <linux/vmalloc.h>
19
#include <linux/fs.h>
20
#include <linux/string.h>
21
#include <linux/gfp.h>
22
23
#include <asm/pgtable.h>
24
#include <asm/sections.h>
25
#include <asm/smp_plat.h>
26
#include <asm/unwind.h>
27
28
#ifdef CONFIG_XIP_KERNEL
29
/*
30
* The XIP kernel text is mapped in the module area for modules and
31
* some other stuff to work without any indirect relocations.
32
* MODULES_VADDR is redefined here and not in asm/memory.h to avoid
33
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
34
*/
35
#undef MODULES_VADDR
36
#define MODULES_VADDR (((unsigned long)_etext + ~PGDIR_MASK) & PGDIR_MASK)
37
#endif
38
39
#ifdef CONFIG_MMU
40
void *module_alloc(unsigned long size)
41
{
42
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
43
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
44
__builtin_return_address(0));
45
}
46
#else /* CONFIG_MMU */
47
void *module_alloc(unsigned long size)
48
{
49
return size == 0 ? NULL : vmalloc(size);
50
}
51
#endif /* !CONFIG_MMU */
52
53
void module_free(struct module *module, void *region)
54
{
55
vfree(region);
56
}
57
58
int module_frob_arch_sections(Elf_Ehdr *hdr,
59
Elf_Shdr *sechdrs,
60
char *secstrings,
61
struct module *mod)
62
{
63
return 0;
64
}
65
66
int
67
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
68
unsigned int relindex, struct module *module)
69
{
70
Elf32_Shdr *symsec = sechdrs + symindex;
71
Elf32_Shdr *relsec = sechdrs + relindex;
72
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
73
Elf32_Rel *rel = (void *)relsec->sh_addr;
74
unsigned int i;
75
76
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
77
unsigned long loc;
78
Elf32_Sym *sym;
79
const char *symname;
80
s32 offset;
81
#ifdef CONFIG_THUMB2_KERNEL
82
u32 upper, lower, sign, j1, j2;
83
#endif
84
85
offset = ELF32_R_SYM(rel->r_info);
86
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
87
pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
88
module->name, relindex, i);
89
return -ENOEXEC;
90
}
91
92
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
93
symname = strtab + sym->st_name;
94
95
if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
96
pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
97
module->name, relindex, i, symname,
98
rel->r_offset, dstsec->sh_size);
99
return -ENOEXEC;
100
}
101
102
loc = dstsec->sh_addr + rel->r_offset;
103
104
switch (ELF32_R_TYPE(rel->r_info)) {
105
case R_ARM_NONE:
106
/* ignore */
107
break;
108
109
case R_ARM_ABS32:
110
*(u32 *)loc += sym->st_value;
111
break;
112
113
case R_ARM_PC24:
114
case R_ARM_CALL:
115
case R_ARM_JUMP24:
116
offset = (*(u32 *)loc & 0x00ffffff) << 2;
117
if (offset & 0x02000000)
118
offset -= 0x04000000;
119
120
offset += sym->st_value - loc;
121
if (offset & 3 ||
122
offset <= (s32)0xfe000000 ||
123
offset >= (s32)0x02000000) {
124
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
125
module->name, relindex, i, symname,
126
ELF32_R_TYPE(rel->r_info), loc,
127
sym->st_value);
128
return -ENOEXEC;
129
}
130
131
offset >>= 2;
132
133
*(u32 *)loc &= 0xff000000;
134
*(u32 *)loc |= offset & 0x00ffffff;
135
break;
136
137
case R_ARM_V4BX:
138
/* Preserve Rm and the condition code. Alter
139
* other bits to re-code instruction as
140
* MOV PC,Rm.
141
*/
142
*(u32 *)loc &= 0xf000000f;
143
*(u32 *)loc |= 0x01a0f000;
144
break;
145
146
case R_ARM_PREL31:
147
offset = *(u32 *)loc + sym->st_value - loc;
148
*(u32 *)loc = offset & 0x7fffffff;
149
break;
150
151
case R_ARM_MOVW_ABS_NC:
152
case R_ARM_MOVT_ABS:
153
offset = *(u32 *)loc;
154
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
155
offset = (offset ^ 0x8000) - 0x8000;
156
157
offset += sym->st_value;
158
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
159
offset >>= 16;
160
161
*(u32 *)loc &= 0xfff0f000;
162
*(u32 *)loc |= ((offset & 0xf000) << 4) |
163
(offset & 0x0fff);
164
break;
165
166
#ifdef CONFIG_THUMB2_KERNEL
167
case R_ARM_THM_CALL:
168
case R_ARM_THM_JUMP24:
169
upper = *(u16 *)loc;
170
lower = *(u16 *)(loc + 2);
171
172
/*
173
* 25 bit signed address range (Thumb-2 BL and B.W
174
* instructions):
175
* S:I1:I2:imm10:imm11:0
176
* where:
177
* S = upper[10] = offset[24]
178
* I1 = ~(J1 ^ S) = offset[23]
179
* I2 = ~(J2 ^ S) = offset[22]
180
* imm10 = upper[9:0] = offset[21:12]
181
* imm11 = lower[10:0] = offset[11:1]
182
* J1 = lower[13]
183
* J2 = lower[11]
184
*/
185
sign = (upper >> 10) & 1;
186
j1 = (lower >> 13) & 1;
187
j2 = (lower >> 11) & 1;
188
offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
189
((~(j2 ^ sign) & 1) << 22) |
190
((upper & 0x03ff) << 12) |
191
((lower & 0x07ff) << 1);
192
if (offset & 0x01000000)
193
offset -= 0x02000000;
194
offset += sym->st_value - loc;
195
196
/*
197
* For function symbols, only Thumb addresses are
198
* allowed (no interworking).
199
*
200
* For non-function symbols, the destination
201
* has no specific ARM/Thumb disposition, so
202
* the branch is resolved under the assumption
203
* that interworking is not required.
204
*/
205
if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
206
!(offset & 1)) ||
207
offset <= (s32)0xff000000 ||
208
offset >= (s32)0x01000000) {
209
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
210
module->name, relindex, i, symname,
211
ELF32_R_TYPE(rel->r_info), loc,
212
sym->st_value);
213
return -ENOEXEC;
214
}
215
216
sign = (offset >> 24) & 1;
217
j1 = sign ^ (~(offset >> 23) & 1);
218
j2 = sign ^ (~(offset >> 22) & 1);
219
*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
220
((offset >> 12) & 0x03ff));
221
*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
222
(j1 << 13) | (j2 << 11) |
223
((offset >> 1) & 0x07ff));
224
break;
225
226
case R_ARM_THM_MOVW_ABS_NC:
227
case R_ARM_THM_MOVT_ABS:
228
upper = *(u16 *)loc;
229
lower = *(u16 *)(loc + 2);
230
231
/*
232
* MOVT/MOVW instructions encoding in Thumb-2:
233
*
234
* i = upper[10]
235
* imm4 = upper[3:0]
236
* imm3 = lower[14:12]
237
* imm8 = lower[7:0]
238
*
239
* imm16 = imm4:i:imm3:imm8
240
*/
241
offset = ((upper & 0x000f) << 12) |
242
((upper & 0x0400) << 1) |
243
((lower & 0x7000) >> 4) | (lower & 0x00ff);
244
offset = (offset ^ 0x8000) - 0x8000;
245
offset += sym->st_value;
246
247
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
248
offset >>= 16;
249
250
*(u16 *)loc = (u16)((upper & 0xfbf0) |
251
((offset & 0xf000) >> 12) |
252
((offset & 0x0800) >> 1));
253
*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
254
((offset & 0x0700) << 4) |
255
(offset & 0x00ff));
256
break;
257
#endif
258
259
default:
260
printk(KERN_ERR "%s: unknown relocation: %u\n",
261
module->name, ELF32_R_TYPE(rel->r_info));
262
return -ENOEXEC;
263
}
264
}
265
return 0;
266
}
267
268
int
269
apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
270
unsigned int symindex, unsigned int relsec, struct module *module)
271
{
272
printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
273
module->name);
274
return -ENOEXEC;
275
}
276
277
struct mod_unwind_map {
278
const Elf_Shdr *unw_sec;
279
const Elf_Shdr *txt_sec;
280
};
281
282
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
283
const Elf_Shdr *sechdrs, const char *name)
284
{
285
const Elf_Shdr *s, *se;
286
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
287
288
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
289
if (strcmp(name, secstrs + s->sh_name) == 0)
290
return s;
291
292
return NULL;
293
}
294
295
extern void fixup_pv_table(const void *, unsigned long);
296
extern void fixup_smp(const void *, unsigned long);
297
298
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
299
struct module *mod)
300
{
301
const Elf_Shdr *s = NULL;
302
#ifdef CONFIG_ARM_UNWIND
303
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
304
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
305
struct mod_unwind_map maps[ARM_SEC_MAX];
306
int i;
307
308
memset(maps, 0, sizeof(maps));
309
310
for (s = sechdrs; s < sechdrs_end; s++) {
311
const char *secname = secstrs + s->sh_name;
312
313
if (!(s->sh_flags & SHF_ALLOC))
314
continue;
315
316
if (strcmp(".ARM.exidx.init.text", secname) == 0)
317
maps[ARM_SEC_INIT].unw_sec = s;
318
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
319
maps[ARM_SEC_DEVINIT].unw_sec = s;
320
else if (strcmp(".ARM.exidx", secname) == 0)
321
maps[ARM_SEC_CORE].unw_sec = s;
322
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
323
maps[ARM_SEC_EXIT].unw_sec = s;
324
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
325
maps[ARM_SEC_DEVEXIT].unw_sec = s;
326
else if (strcmp(".init.text", secname) == 0)
327
maps[ARM_SEC_INIT].txt_sec = s;
328
else if (strcmp(".devinit.text", secname) == 0)
329
maps[ARM_SEC_DEVINIT].txt_sec = s;
330
else if (strcmp(".text", secname) == 0)
331
maps[ARM_SEC_CORE].txt_sec = s;
332
else if (strcmp(".exit.text", secname) == 0)
333
maps[ARM_SEC_EXIT].txt_sec = s;
334
else if (strcmp(".devexit.text", secname) == 0)
335
maps[ARM_SEC_DEVEXIT].txt_sec = s;
336
}
337
338
for (i = 0; i < ARM_SEC_MAX; i++)
339
if (maps[i].unw_sec && maps[i].txt_sec)
340
mod->arch.unwind[i] =
341
unwind_table_add(maps[i].unw_sec->sh_addr,
342
maps[i].unw_sec->sh_size,
343
maps[i].txt_sec->sh_addr,
344
maps[i].txt_sec->sh_size);
345
#endif
346
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
347
s = find_mod_section(hdr, sechdrs, ".pv_table");
348
if (s)
349
fixup_pv_table((void *)s->sh_addr, s->sh_size);
350
#endif
351
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
352
if (s && !is_smp())
353
fixup_smp((void *)s->sh_addr, s->sh_size);
354
return 0;
355
}
356
357
void
358
module_arch_cleanup(struct module *mod)
359
{
360
#ifdef CONFIG_ARM_UNWIND
361
int i;
362
363
for (i = 0; i < ARM_SEC_MAX; i++)
364
if (mod->arch.unwind[i])
365
unwind_table_del(mod->arch.unwind[i]);
366
#endif
367
}
368
369