Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/kernel/alternative.c
10817 views
1
#include <linux/module.h>
2
#include <linux/sched.h>
3
#include <linux/mutex.h>
4
#include <linux/list.h>
5
#include <linux/stringify.h>
6
#include <linux/kprobes.h>
7
#include <linux/mm.h>
8
#include <linux/vmalloc.h>
9
#include <linux/memory.h>
10
#include <linux/stop_machine.h>
11
#include <linux/slab.h>
12
#include <asm/alternative.h>
13
#include <asm/sections.h>
14
#include <asm/pgtable.h>
15
#include <asm/mce.h>
16
#include <asm/nmi.h>
17
#include <asm/vsyscall.h>
18
#include <asm/cacheflush.h>
19
#include <asm/tlbflush.h>
20
#include <asm/io.h>
21
#include <asm/fixmap.h>
22
23
#define MAX_PATCH_LEN (255-1)
24
25
#ifdef CONFIG_HOTPLUG_CPU
26
static int smp_alt_once;
27
28
static int __init bootonly(char *str)
29
{
30
smp_alt_once = 1;
31
return 1;
32
}
33
__setup("smp-alt-boot", bootonly);
34
#else
35
#define smp_alt_once 1
36
#endif
37
38
static int __initdata_or_module debug_alternative;
39
40
static int __init debug_alt(char *str)
41
{
42
debug_alternative = 1;
43
return 1;
44
}
45
__setup("debug-alternative", debug_alt);
46
47
static int noreplace_smp;
48
49
static int __init setup_noreplace_smp(char *str)
50
{
51
noreplace_smp = 1;
52
return 1;
53
}
54
__setup("noreplace-smp", setup_noreplace_smp);
55
56
#ifdef CONFIG_PARAVIRT
57
static int __initdata_or_module noreplace_paravirt = 0;
58
59
static int __init setup_noreplace_paravirt(char *str)
60
{
61
noreplace_paravirt = 1;
62
return 1;
63
}
64
__setup("noreplace-paravirt", setup_noreplace_paravirt);
65
#endif
66
67
#define DPRINTK(fmt, args...) if (debug_alternative) \
68
printk(KERN_DEBUG fmt, args)
69
70
/*
71
* Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72
* that correspond to that nop. Getting from one nop to the next, we
73
* add to the array the offset that is equal to the sum of all sizes of
74
* nops preceding the one we are after.
75
*
76
* Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77
* nice symmetry of sizes of the previous nops.
78
*/
79
#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80
static const unsigned char intelnops[] =
81
{
82
GENERIC_NOP1,
83
GENERIC_NOP2,
84
GENERIC_NOP3,
85
GENERIC_NOP4,
86
GENERIC_NOP5,
87
GENERIC_NOP6,
88
GENERIC_NOP7,
89
GENERIC_NOP8,
90
GENERIC_NOP5_ATOMIC
91
};
92
static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
93
{
94
NULL,
95
intelnops,
96
intelnops + 1,
97
intelnops + 1 + 2,
98
intelnops + 1 + 2 + 3,
99
intelnops + 1 + 2 + 3 + 4,
100
intelnops + 1 + 2 + 3 + 4 + 5,
101
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
104
};
105
#endif
106
107
#ifdef K8_NOP1
108
static const unsigned char k8nops[] =
109
{
110
K8_NOP1,
111
K8_NOP2,
112
K8_NOP3,
113
K8_NOP4,
114
K8_NOP5,
115
K8_NOP6,
116
K8_NOP7,
117
K8_NOP8,
118
K8_NOP5_ATOMIC
119
};
120
static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
121
{
122
NULL,
123
k8nops,
124
k8nops + 1,
125
k8nops + 1 + 2,
126
k8nops + 1 + 2 + 3,
127
k8nops + 1 + 2 + 3 + 4,
128
k8nops + 1 + 2 + 3 + 4 + 5,
129
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
132
};
133
#endif
134
135
#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136
static const unsigned char k7nops[] =
137
{
138
K7_NOP1,
139
K7_NOP2,
140
K7_NOP3,
141
K7_NOP4,
142
K7_NOP5,
143
K7_NOP6,
144
K7_NOP7,
145
K7_NOP8,
146
K7_NOP5_ATOMIC
147
};
148
static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
149
{
150
NULL,
151
k7nops,
152
k7nops + 1,
153
k7nops + 1 + 2,
154
k7nops + 1 + 2 + 3,
155
k7nops + 1 + 2 + 3 + 4,
156
k7nops + 1 + 2 + 3 + 4 + 5,
157
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
160
};
161
#endif
162
163
#ifdef P6_NOP1
164
static const unsigned char __initconst_or_module p6nops[] =
165
{
166
P6_NOP1,
167
P6_NOP2,
168
P6_NOP3,
169
P6_NOP4,
170
P6_NOP5,
171
P6_NOP6,
172
P6_NOP7,
173
P6_NOP8,
174
P6_NOP5_ATOMIC
175
};
176
static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
177
{
178
NULL,
179
p6nops,
180
p6nops + 1,
181
p6nops + 1 + 2,
182
p6nops + 1 + 2 + 3,
183
p6nops + 1 + 2 + 3 + 4,
184
p6nops + 1 + 2 + 3 + 4 + 5,
185
p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
188
};
189
#endif
190
191
/* Initialize these to a safe default */
192
#ifdef CONFIG_X86_64
193
const unsigned char * const *ideal_nops = p6_nops;
194
#else
195
const unsigned char * const *ideal_nops = intel_nops;
196
#endif
197
198
void __init arch_init_ideal_nops(void)
199
{
200
switch (boot_cpu_data.x86_vendor) {
201
case X86_VENDOR_INTEL:
202
/*
203
* Due to a decoder implementation quirk, some
204
* specific Intel CPUs actually perform better with
205
* the "k8_nops" than with the SDM-recommended NOPs.
206
*/
207
if (boot_cpu_data.x86 == 6 &&
208
boot_cpu_data.x86_model >= 0x0f &&
209
boot_cpu_data.x86_model != 0x1c &&
210
boot_cpu_data.x86_model != 0x26 &&
211
boot_cpu_data.x86_model != 0x27 &&
212
boot_cpu_data.x86_model < 0x30) {
213
ideal_nops = k8_nops;
214
} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215
ideal_nops = p6_nops;
216
} else {
217
#ifdef CONFIG_X86_64
218
ideal_nops = k8_nops;
219
#else
220
ideal_nops = intel_nops;
221
#endif
222
}
223
224
default:
225
#ifdef CONFIG_X86_64
226
ideal_nops = k8_nops;
227
#else
228
if (boot_cpu_has(X86_FEATURE_K8))
229
ideal_nops = k8_nops;
230
else if (boot_cpu_has(X86_FEATURE_K7))
231
ideal_nops = k7_nops;
232
else
233
ideal_nops = intel_nops;
234
#endif
235
}
236
}
237
238
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
239
static void __init_or_module add_nops(void *insns, unsigned int len)
240
{
241
while (len > 0) {
242
unsigned int noplen = len;
243
if (noplen > ASM_NOP_MAX)
244
noplen = ASM_NOP_MAX;
245
memcpy(insns, ideal_nops[noplen], noplen);
246
insns += noplen;
247
len -= noplen;
248
}
249
}
250
251
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
252
extern s32 __smp_locks[], __smp_locks_end[];
253
extern char __vsyscall_0;
254
void *text_poke_early(void *addr, const void *opcode, size_t len);
255
256
/* Replace instructions with better alternatives for this CPU type.
257
This runs before SMP is initialized to avoid SMP problems with
258
self modifying code. This implies that asymmetric systems where
259
APs have less capabilities than the boot processor are not handled.
260
Tough. Make sure you disable such features by hand. */
261
262
void __init_or_module apply_alternatives(struct alt_instr *start,
263
struct alt_instr *end)
264
{
265
struct alt_instr *a;
266
u8 insnbuf[MAX_PATCH_LEN];
267
268
DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
269
/*
270
* The scan order should be from start to end. A later scanned
271
* alternative code can overwrite a previous scanned alternative code.
272
* Some kernel functions (e.g. memcpy, memset, etc) use this order to
273
* patch code.
274
*
275
* So be careful if you want to change the scan order to any other
276
* order.
277
*/
278
for (a = start; a < end; a++) {
279
u8 *instr = a->instr;
280
BUG_ON(a->replacementlen > a->instrlen);
281
BUG_ON(a->instrlen > sizeof(insnbuf));
282
BUG_ON(a->cpuid >= NCAPINTS*32);
283
if (!boot_cpu_has(a->cpuid))
284
continue;
285
#ifdef CONFIG_X86_64
286
/* vsyscall code is not mapped yet. resolve it manually. */
287
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
288
instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
289
DPRINTK("%s: vsyscall fixup: %p => %p\n",
290
__func__, a->instr, instr);
291
}
292
#endif
293
memcpy(insnbuf, a->replacement, a->replacementlen);
294
if (*insnbuf == 0xe8 && a->replacementlen == 5)
295
*(s32 *)(insnbuf + 1) += a->replacement - a->instr;
296
add_nops(insnbuf + a->replacementlen,
297
a->instrlen - a->replacementlen);
298
text_poke_early(instr, insnbuf, a->instrlen);
299
}
300
}
301
302
#ifdef CONFIG_SMP
303
304
static void alternatives_smp_lock(const s32 *start, const s32 *end,
305
u8 *text, u8 *text_end)
306
{
307
const s32 *poff;
308
309
mutex_lock(&text_mutex);
310
for (poff = start; poff < end; poff++) {
311
u8 *ptr = (u8 *)poff + *poff;
312
313
if (!*poff || ptr < text || ptr >= text_end)
314
continue;
315
/* turn DS segment override prefix into lock prefix */
316
if (*ptr == 0x3e)
317
text_poke(ptr, ((unsigned char []){0xf0}), 1);
318
};
319
mutex_unlock(&text_mutex);
320
}
321
322
static void alternatives_smp_unlock(const s32 *start, const s32 *end,
323
u8 *text, u8 *text_end)
324
{
325
const s32 *poff;
326
327
if (noreplace_smp)
328
return;
329
330
mutex_lock(&text_mutex);
331
for (poff = start; poff < end; poff++) {
332
u8 *ptr = (u8 *)poff + *poff;
333
334
if (!*poff || ptr < text || ptr >= text_end)
335
continue;
336
/* turn lock prefix into DS segment override prefix */
337
if (*ptr == 0xf0)
338
text_poke(ptr, ((unsigned char []){0x3E}), 1);
339
};
340
mutex_unlock(&text_mutex);
341
}
342
343
struct smp_alt_module {
344
/* what is this ??? */
345
struct module *mod;
346
char *name;
347
348
/* ptrs to lock prefixes */
349
const s32 *locks;
350
const s32 *locks_end;
351
352
/* .text segment, needed to avoid patching init code ;) */
353
u8 *text;
354
u8 *text_end;
355
356
struct list_head next;
357
};
358
static LIST_HEAD(smp_alt_modules);
359
static DEFINE_MUTEX(smp_alt);
360
static int smp_mode = 1; /* protected by smp_alt */
361
362
void __init_or_module alternatives_smp_module_add(struct module *mod,
363
char *name,
364
void *locks, void *locks_end,
365
void *text, void *text_end)
366
{
367
struct smp_alt_module *smp;
368
369
if (noreplace_smp)
370
return;
371
372
if (smp_alt_once) {
373
if (boot_cpu_has(X86_FEATURE_UP))
374
alternatives_smp_unlock(locks, locks_end,
375
text, text_end);
376
return;
377
}
378
379
smp = kzalloc(sizeof(*smp), GFP_KERNEL);
380
if (NULL == smp)
381
return; /* we'll run the (safe but slow) SMP code then ... */
382
383
smp->mod = mod;
384
smp->name = name;
385
smp->locks = locks;
386
smp->locks_end = locks_end;
387
smp->text = text;
388
smp->text_end = text_end;
389
DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
390
__func__, smp->locks, smp->locks_end,
391
smp->text, smp->text_end, smp->name);
392
393
mutex_lock(&smp_alt);
394
list_add_tail(&smp->next, &smp_alt_modules);
395
if (boot_cpu_has(X86_FEATURE_UP))
396
alternatives_smp_unlock(smp->locks, smp->locks_end,
397
smp->text, smp->text_end);
398
mutex_unlock(&smp_alt);
399
}
400
401
void __init_or_module alternatives_smp_module_del(struct module *mod)
402
{
403
struct smp_alt_module *item;
404
405
if (smp_alt_once || noreplace_smp)
406
return;
407
408
mutex_lock(&smp_alt);
409
list_for_each_entry(item, &smp_alt_modules, next) {
410
if (mod != item->mod)
411
continue;
412
list_del(&item->next);
413
mutex_unlock(&smp_alt);
414
DPRINTK("%s: %s\n", __func__, item->name);
415
kfree(item);
416
return;
417
}
418
mutex_unlock(&smp_alt);
419
}
420
421
bool skip_smp_alternatives;
422
void alternatives_smp_switch(int smp)
423
{
424
struct smp_alt_module *mod;
425
426
#ifdef CONFIG_LOCKDEP
427
/*
428
* Older binutils section handling bug prevented
429
* alternatives-replacement from working reliably.
430
*
431
* If this still occurs then you should see a hang
432
* or crash shortly after this line:
433
*/
434
printk("lockdep: fixing up alternatives.\n");
435
#endif
436
437
if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
438
return;
439
BUG_ON(!smp && (num_online_cpus() > 1));
440
441
mutex_lock(&smp_alt);
442
443
/*
444
* Avoid unnecessary switches because it forces JIT based VMs to
445
* throw away all cached translations, which can be quite costly.
446
*/
447
if (smp == smp_mode) {
448
/* nothing */
449
} else if (smp) {
450
printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
451
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
452
clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
453
list_for_each_entry(mod, &smp_alt_modules, next)
454
alternatives_smp_lock(mod->locks, mod->locks_end,
455
mod->text, mod->text_end);
456
} else {
457
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
458
set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
459
set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
460
list_for_each_entry(mod, &smp_alt_modules, next)
461
alternatives_smp_unlock(mod->locks, mod->locks_end,
462
mod->text, mod->text_end);
463
}
464
smp_mode = smp;
465
mutex_unlock(&smp_alt);
466
}
467
468
/* Return 1 if the address range is reserved for smp-alternatives */
469
int alternatives_text_reserved(void *start, void *end)
470
{
471
struct smp_alt_module *mod;
472
const s32 *poff;
473
u8 *text_start = start;
474
u8 *text_end = end;
475
476
list_for_each_entry(mod, &smp_alt_modules, next) {
477
if (mod->text > text_end || mod->text_end < text_start)
478
continue;
479
for (poff = mod->locks; poff < mod->locks_end; poff++) {
480
const u8 *ptr = (const u8 *)poff + *poff;
481
482
if (text_start <= ptr && text_end > ptr)
483
return 1;
484
}
485
}
486
487
return 0;
488
}
489
#endif
490
491
#ifdef CONFIG_PARAVIRT
492
void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
493
struct paravirt_patch_site *end)
494
{
495
struct paravirt_patch_site *p;
496
char insnbuf[MAX_PATCH_LEN];
497
498
if (noreplace_paravirt)
499
return;
500
501
for (p = start; p < end; p++) {
502
unsigned int used;
503
504
BUG_ON(p->len > MAX_PATCH_LEN);
505
/* prep the buffer with the original instructions */
506
memcpy(insnbuf, p->instr, p->len);
507
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
508
(unsigned long)p->instr, p->len);
509
510
BUG_ON(used > p->len);
511
512
/* Pad the rest with nops */
513
add_nops(insnbuf + used, p->len - used);
514
text_poke_early(p->instr, insnbuf, p->len);
515
}
516
}
517
extern struct paravirt_patch_site __start_parainstructions[],
518
__stop_parainstructions[];
519
#endif /* CONFIG_PARAVIRT */
520
521
void __init alternative_instructions(void)
522
{
523
/* The patching is not fully atomic, so try to avoid local interruptions
524
that might execute the to be patched code.
525
Other CPUs are not running. */
526
stop_nmi();
527
528
/*
529
* Don't stop machine check exceptions while patching.
530
* MCEs only happen when something got corrupted and in this
531
* case we must do something about the corruption.
532
* Ignoring it is worse than a unlikely patching race.
533
* Also machine checks tend to be broadcast and if one CPU
534
* goes into machine check the others follow quickly, so we don't
535
* expect a machine check to cause undue problems during to code
536
* patching.
537
*/
538
539
apply_alternatives(__alt_instructions, __alt_instructions_end);
540
541
/* switch to patch-once-at-boottime-only mode and free the
542
* tables in case we know the number of CPUs will never ever
543
* change */
544
#ifdef CONFIG_HOTPLUG_CPU
545
if (num_possible_cpus() < 2)
546
smp_alt_once = 1;
547
#endif
548
549
#ifdef CONFIG_SMP
550
if (smp_alt_once) {
551
if (1 == num_possible_cpus()) {
552
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
553
set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
554
set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
555
556
alternatives_smp_unlock(__smp_locks, __smp_locks_end,
557
_text, _etext);
558
}
559
} else {
560
alternatives_smp_module_add(NULL, "core kernel",
561
__smp_locks, __smp_locks_end,
562
_text, _etext);
563
564
/* Only switch to UP mode if we don't immediately boot others */
565
if (num_present_cpus() == 1 || setup_max_cpus <= 1)
566
alternatives_smp_switch(0);
567
}
568
#endif
569
apply_paravirt(__parainstructions, __parainstructions_end);
570
571
if (smp_alt_once)
572
free_init_pages("SMP alternatives",
573
(unsigned long)__smp_locks,
574
(unsigned long)__smp_locks_end);
575
576
restart_nmi();
577
}
578
579
/**
580
* text_poke_early - Update instructions on a live kernel at boot time
581
* @addr: address to modify
582
* @opcode: source of the copy
583
* @len: length to copy
584
*
585
* When you use this code to patch more than one byte of an instruction
586
* you need to make sure that other CPUs cannot execute this code in parallel.
587
* Also no thread must be currently preempted in the middle of these
588
* instructions. And on the local CPU you need to be protected again NMI or MCE
589
* handlers seeing an inconsistent instruction while you patch.
590
*/
591
void *__init_or_module text_poke_early(void *addr, const void *opcode,
592
size_t len)
593
{
594
unsigned long flags;
595
local_irq_save(flags);
596
memcpy(addr, opcode, len);
597
sync_core();
598
local_irq_restore(flags);
599
/* Could also do a CLFLUSH here to speed up CPU recovery; but
600
that causes hangs on some VIA CPUs. */
601
return addr;
602
}
603
604
/**
605
* text_poke - Update instructions on a live kernel
606
* @addr: address to modify
607
* @opcode: source of the copy
608
* @len: length to copy
609
*
610
* Only atomic text poke/set should be allowed when not doing early patching.
611
* It means the size must be writable atomically and the address must be aligned
612
* in a way that permits an atomic write. It also makes sure we fit on a single
613
* page.
614
*
615
* Note: Must be called under text_mutex.
616
*/
617
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
618
{
619
unsigned long flags;
620
char *vaddr;
621
struct page *pages[2];
622
int i;
623
624
if (!core_kernel_text((unsigned long)addr)) {
625
pages[0] = vmalloc_to_page(addr);
626
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
627
} else {
628
pages[0] = virt_to_page(addr);
629
WARN_ON(!PageReserved(pages[0]));
630
pages[1] = virt_to_page(addr + PAGE_SIZE);
631
}
632
BUG_ON(!pages[0]);
633
local_irq_save(flags);
634
set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
635
if (pages[1])
636
set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
637
vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
638
memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
639
clear_fixmap(FIX_TEXT_POKE0);
640
if (pages[1])
641
clear_fixmap(FIX_TEXT_POKE1);
642
local_flush_tlb();
643
sync_core();
644
/* Could also do a CLFLUSH here to speed up CPU recovery; but
645
that causes hangs on some VIA CPUs. */
646
for (i = 0; i < len; i++)
647
BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
648
local_irq_restore(flags);
649
return addr;
650
}
651
652
/*
653
* Cross-modifying kernel text with stop_machine().
654
* This code originally comes from immediate value.
655
*/
656
static atomic_t stop_machine_first;
657
static int wrote_text;
658
659
struct text_poke_params {
660
struct text_poke_param *params;
661
int nparams;
662
};
663
664
static int __kprobes stop_machine_text_poke(void *data)
665
{
666
struct text_poke_params *tpp = data;
667
struct text_poke_param *p;
668
int i;
669
670
if (atomic_dec_and_test(&stop_machine_first)) {
671
for (i = 0; i < tpp->nparams; i++) {
672
p = &tpp->params[i];
673
text_poke(p->addr, p->opcode, p->len);
674
}
675
smp_wmb(); /* Make sure other cpus see that this has run */
676
wrote_text = 1;
677
} else {
678
while (!wrote_text)
679
cpu_relax();
680
smp_mb(); /* Load wrote_text before following execution */
681
}
682
683
for (i = 0; i < tpp->nparams; i++) {
684
p = &tpp->params[i];
685
flush_icache_range((unsigned long)p->addr,
686
(unsigned long)p->addr + p->len);
687
}
688
/*
689
* Intel Archiecture Software Developer's Manual section 7.1.3 specifies
690
* that a core serializing instruction such as "cpuid" should be
691
* executed on _each_ core before the new instruction is made visible.
692
*/
693
sync_core();
694
return 0;
695
}
696
697
/**
698
* text_poke_smp - Update instructions on a live kernel on SMP
699
* @addr: address to modify
700
* @opcode: source of the copy
701
* @len: length to copy
702
*
703
* Modify multi-byte instruction by using stop_machine() on SMP. This allows
704
* user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
705
* should be allowed, since stop_machine() does _not_ protect code against
706
* NMI and MCE.
707
*
708
* Note: Must be called under get_online_cpus() and text_mutex.
709
*/
710
void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
711
{
712
struct text_poke_params tpp;
713
struct text_poke_param p;
714
715
p.addr = addr;
716
p.opcode = opcode;
717
p.len = len;
718
tpp.params = &p;
719
tpp.nparams = 1;
720
atomic_set(&stop_machine_first, 1);
721
wrote_text = 0;
722
/* Use __stop_machine() because the caller already got online_cpus. */
723
__stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
724
return addr;
725
}
726
727
/**
728
* text_poke_smp_batch - Update instructions on a live kernel on SMP
729
* @params: an array of text_poke parameters
730
* @n: the number of elements in params.
731
*
732
* Modify multi-byte instruction by using stop_machine() on SMP. Since the
733
* stop_machine() is heavy task, it is better to aggregate text_poke requests
734
* and do it once if possible.
735
*
736
* Note: Must be called under get_online_cpus() and text_mutex.
737
*/
738
void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
739
{
740
struct text_poke_params tpp = {.params = params, .nparams = n};
741
742
atomic_set(&stop_machine_first, 1);
743
wrote_text = 0;
744
__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
745
}
746
747