Path: blob/master/arch/x86/include/asm/alternative.h
10821 views
#ifndef _ASM_X86_ALTERNATIVE_H1#define _ASM_X86_ALTERNATIVE_H23#include <linux/types.h>4#include <linux/stddef.h>5#include <linux/stringify.h>6#include <asm/asm.h>78/*9* Alternative inline assembly for SMP.10*11* The LOCK_PREFIX macro defined here replaces the LOCK and12* LOCK_PREFIX macros used everywhere in the source tree.13*14* SMP alternatives use the same data structures as the other15* alternatives and the X86_FEATURE_UP flag to indicate the case of a16* UP system running a SMP kernel. The existing apply_alternatives()17* works fine for patching a SMP kernel for UP.18*19* The SMP alternative tables can be kept after boot and contain both20* UP and SMP versions of the instructions to allow switching back to21* SMP at runtime, when hotplugging in a new CPU, which is especially22* useful in virtualized environments.23*24* The very common lock prefix is handled as special case in a25* separate table which is a pure address list without replacement ptr26* and size information. That keeps the table sizes small.27*/2829#ifdef CONFIG_SMP30#define LOCK_PREFIX_HERE \31".section .smp_locks,\"a\"\n" \32".balign 4\n" \33".long 671f - .\n" /* offset */ \34".previous\n" \35"671:"3637#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "3839#else /* ! CONFIG_SMP */40#define LOCK_PREFIX_HERE ""41#define LOCK_PREFIX ""42#endif4344struct alt_instr {45u8 *instr; /* original instruction */46u8 *replacement;47u16 cpuid; /* cpuid bit set for replacement */48u8 instrlen; /* length of original instruction */49u8 replacementlen; /* length of new instruction, <= instrlen */50#ifdef CONFIG_X86_6451u32 pad2;52#endif53};5455extern void alternative_instructions(void);56extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);5758struct module;5960#ifdef CONFIG_SMP61extern void alternatives_smp_module_add(struct module *mod, char *name,62void *locks, void *locks_end,63void *text, void *text_end);64extern void alternatives_smp_module_del(struct module *mod);65extern void alternatives_smp_switch(int smp);66extern int alternatives_text_reserved(void *start, void *end);67extern bool skip_smp_alternatives;68#else69static inline void alternatives_smp_module_add(struct module *mod, char *name,70void *locks, void *locks_end,71void *text, void *text_end) {}72static inline void alternatives_smp_module_del(struct module *mod) {}73static inline void alternatives_smp_switch(int smp) {}74static inline int alternatives_text_reserved(void *start, void *end)75{76return 0;77}78#endif /* CONFIG_SMP */7980/* alternative assembly primitive: */81#define ALTERNATIVE(oldinstr, newinstr, feature) \82\83"661:\n\t" oldinstr "\n662:\n" \84".section .altinstructions,\"a\"\n" \85_ASM_ALIGN "\n" \86_ASM_PTR "661b\n" /* label */ \87_ASM_PTR "663f\n" /* new instruction */ \88" .word " __stringify(feature) "\n" /* feature bit */ \89" .byte 662b-661b\n" /* sourcelen */ \90" .byte 664f-663f\n" /* replacementlen */ \91".previous\n" \92".section .discard,\"aw\",@progbits\n" \93" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \94".previous\n" \95".section .altinstr_replacement, \"ax\"\n" \96"663:\n\t" newinstr "\n664:\n" /* replacement */ \97".previous"9899/*100* This must be included *after* the definition of ALTERNATIVE due to101* <asm/arch_hweight.h>102*/103#include <asm/cpufeature.h>104105/*106* Alternative instructions for different CPU types or capabilities.107*108* This allows to use optimized instructions even on generic binary109* kernels.110*111* length of oldinstr must be longer or equal the length of newinstr112* It can be padded with nops as needed.113*114* For non barrier like inlines please define new variants115* without volatile and memory clobber.116*/117#define alternative(oldinstr, newinstr, feature) \118asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")119120/*121* Alternative inline assembly with input.122*123* Pecularities:124* No memory clobber here.125* Argument numbers start with 1.126* Best is to use constraints that are fixed size (like (%1) ... "r")127* If you use variable sized constraints like "m" or "g" in the128* replacement make sure to pad to the worst case length.129* Leaving an unused argument 0 to keep API compatibility.130*/131#define alternative_input(oldinstr, newinstr, feature, input...) \132asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \133: : "i" (0), ## input)134135/* Like alternative_input, but with a single output argument */136#define alternative_io(oldinstr, newinstr, feature, output, input...) \137asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \138: output : "i" (0), ## input)139140/* Like alternative_io, but for replacing a direct call with another one. */141#define alternative_call(oldfunc, newfunc, feature, output, input...) \142asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \143: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)144145/*146* use this macro(s) if you need more than one output parameter147* in alternative_io148*/149#define ASM_OUTPUT2(a...) a150151struct paravirt_patch_site;152#ifdef CONFIG_PARAVIRT153void apply_paravirt(struct paravirt_patch_site *start,154struct paravirt_patch_site *end);155#else156static inline void apply_paravirt(struct paravirt_patch_site *start,157struct paravirt_patch_site *end)158{}159#define __parainstructions NULL160#define __parainstructions_end NULL161#endif162163extern void *text_poke_early(void *addr, const void *opcode, size_t len);164165/*166* Clear and restore the kernel write-protection flag on the local CPU.167* Allows the kernel to edit read-only pages.168* Side-effect: any interrupt handler running between save and restore will have169* the ability to write to read-only pages.170*171* Warning:172* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and173* no thread can be preempted in the instructions being modified (no iret to an174* invalid instruction possible) or if the instructions are changed from a175* consistent state to another consistent state atomically.176* More care must be taken when modifying code in the SMP case because of177* Intel's errata. text_poke_smp() takes care that errata, but still178* doesn't support NMI/MCE handler code modifying.179* On the local CPU you need to be protected again NMI or MCE handlers seeing an180* inconsistent instruction while you patch.181*/182struct text_poke_param {183void *addr;184const void *opcode;185size_t len;186};187188extern void *text_poke(void *addr, const void *opcode, size_t len);189extern void *text_poke_smp(void *addr, const void *opcode, size_t len);190extern void text_poke_smp_batch(struct text_poke_param *params, int n);191192#endif /* _ASM_X86_ALTERNATIVE_H */193194195