Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/include/asm/alternative.h
10821 views
1
#ifndef _ASM_X86_ALTERNATIVE_H
2
#define _ASM_X86_ALTERNATIVE_H
3
4
#include <linux/types.h>
5
#include <linux/stddef.h>
6
#include <linux/stringify.h>
7
#include <asm/asm.h>
8
9
/*
10
* Alternative inline assembly for SMP.
11
*
12
* The LOCK_PREFIX macro defined here replaces the LOCK and
13
* LOCK_PREFIX macros used everywhere in the source tree.
14
*
15
* SMP alternatives use the same data structures as the other
16
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
17
* UP system running a SMP kernel. The existing apply_alternatives()
18
* works fine for patching a SMP kernel for UP.
19
*
20
* The SMP alternative tables can be kept after boot and contain both
21
* UP and SMP versions of the instructions to allow switching back to
22
* SMP at runtime, when hotplugging in a new CPU, which is especially
23
* useful in virtualized environments.
24
*
25
* The very common lock prefix is handled as special case in a
26
* separate table which is a pure address list without replacement ptr
27
* and size information. That keeps the table sizes small.
28
*/
29
30
#ifdef CONFIG_SMP
31
#define LOCK_PREFIX_HERE \
32
".section .smp_locks,\"a\"\n" \
33
".balign 4\n" \
34
".long 671f - .\n" /* offset */ \
35
".previous\n" \
36
"671:"
37
38
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
39
40
#else /* ! CONFIG_SMP */
41
#define LOCK_PREFIX_HERE ""
42
#define LOCK_PREFIX ""
43
#endif
44
45
struct alt_instr {
46
u8 *instr; /* original instruction */
47
u8 *replacement;
48
u16 cpuid; /* cpuid bit set for replacement */
49
u8 instrlen; /* length of original instruction */
50
u8 replacementlen; /* length of new instruction, <= instrlen */
51
#ifdef CONFIG_X86_64
52
u32 pad2;
53
#endif
54
};
55
56
extern void alternative_instructions(void);
57
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
58
59
struct module;
60
61
#ifdef CONFIG_SMP
62
extern void alternatives_smp_module_add(struct module *mod, char *name,
63
void *locks, void *locks_end,
64
void *text, void *text_end);
65
extern void alternatives_smp_module_del(struct module *mod);
66
extern void alternatives_smp_switch(int smp);
67
extern int alternatives_text_reserved(void *start, void *end);
68
extern bool skip_smp_alternatives;
69
#else
70
static inline void alternatives_smp_module_add(struct module *mod, char *name,
71
void *locks, void *locks_end,
72
void *text, void *text_end) {}
73
static inline void alternatives_smp_module_del(struct module *mod) {}
74
static inline void alternatives_smp_switch(int smp) {}
75
static inline int alternatives_text_reserved(void *start, void *end)
76
{
77
return 0;
78
}
79
#endif /* CONFIG_SMP */
80
81
/* alternative assembly primitive: */
82
#define ALTERNATIVE(oldinstr, newinstr, feature) \
83
\
84
"661:\n\t" oldinstr "\n662:\n" \
85
".section .altinstructions,\"a\"\n" \
86
_ASM_ALIGN "\n" \
87
_ASM_PTR "661b\n" /* label */ \
88
_ASM_PTR "663f\n" /* new instruction */ \
89
" .word " __stringify(feature) "\n" /* feature bit */ \
90
" .byte 662b-661b\n" /* sourcelen */ \
91
" .byte 664f-663f\n" /* replacementlen */ \
92
".previous\n" \
93
".section .discard,\"aw\",@progbits\n" \
94
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
95
".previous\n" \
96
".section .altinstr_replacement, \"ax\"\n" \
97
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
98
".previous"
99
100
/*
101
* This must be included *after* the definition of ALTERNATIVE due to
102
* <asm/arch_hweight.h>
103
*/
104
#include <asm/cpufeature.h>
105
106
/*
107
* Alternative instructions for different CPU types or capabilities.
108
*
109
* This allows to use optimized instructions even on generic binary
110
* kernels.
111
*
112
* length of oldinstr must be longer or equal the length of newinstr
113
* It can be padded with nops as needed.
114
*
115
* For non barrier like inlines please define new variants
116
* without volatile and memory clobber.
117
*/
118
#define alternative(oldinstr, newinstr, feature) \
119
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
120
121
/*
122
* Alternative inline assembly with input.
123
*
124
* Pecularities:
125
* No memory clobber here.
126
* Argument numbers start with 1.
127
* Best is to use constraints that are fixed size (like (%1) ... "r")
128
* If you use variable sized constraints like "m" or "g" in the
129
* replacement make sure to pad to the worst case length.
130
* Leaving an unused argument 0 to keep API compatibility.
131
*/
132
#define alternative_input(oldinstr, newinstr, feature, input...) \
133
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
134
: : "i" (0), ## input)
135
136
/* Like alternative_input, but with a single output argument */
137
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
138
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
139
: output : "i" (0), ## input)
140
141
/* Like alternative_io, but for replacing a direct call with another one. */
142
#define alternative_call(oldfunc, newfunc, feature, output, input...) \
143
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
144
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
145
146
/*
147
* use this macro(s) if you need more than one output parameter
148
* in alternative_io
149
*/
150
#define ASM_OUTPUT2(a...) a
151
152
struct paravirt_patch_site;
153
#ifdef CONFIG_PARAVIRT
154
void apply_paravirt(struct paravirt_patch_site *start,
155
struct paravirt_patch_site *end);
156
#else
157
static inline void apply_paravirt(struct paravirt_patch_site *start,
158
struct paravirt_patch_site *end)
159
{}
160
#define __parainstructions NULL
161
#define __parainstructions_end NULL
162
#endif
163
164
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
165
166
/*
167
* Clear and restore the kernel write-protection flag on the local CPU.
168
* Allows the kernel to edit read-only pages.
169
* Side-effect: any interrupt handler running between save and restore will have
170
* the ability to write to read-only pages.
171
*
172
* Warning:
173
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
174
* no thread can be preempted in the instructions being modified (no iret to an
175
* invalid instruction possible) or if the instructions are changed from a
176
* consistent state to another consistent state atomically.
177
* More care must be taken when modifying code in the SMP case because of
178
* Intel's errata. text_poke_smp() takes care that errata, but still
179
* doesn't support NMI/MCE handler code modifying.
180
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
181
* inconsistent instruction while you patch.
182
*/
183
struct text_poke_param {
184
void *addr;
185
const void *opcode;
186
size_t len;
187
};
188
189
extern void *text_poke(void *addr, const void *opcode, size_t len);
190
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
191
extern void text_poke_smp_batch(struct text_poke_param *params, int n);
192
193
#endif /* _ASM_X86_ALTERNATIVE_H */
194
195