/* SPDX-License-Identifier: GPL-2.0 */12#include <linux/export.h>3#include <linux/stringify.h>4#include <linux/linkage.h>5#include <asm/dwarf2.h>6#include <asm/cpufeatures.h>7#include <asm/alternative.h>8#include <asm/asm-offsets.h>9#include <asm/nospec-branch.h>10#include <asm/unwind_hints.h>11#include <asm/percpu.h>12#include <asm/frame.h>13#include <asm/nops.h>1415.section .text..__x86.indirect_thunk161718.macro POLINE reg19ANNOTATE_INTRA_FUNCTION_CALL20call .Ldo_rop_\@21int322.Ldo_rop_\@:23mov %\reg, (%_ASM_SP)24UNWIND_HINT_FUNC25.endm2627.macro RETPOLINE reg28POLINE \reg29RET30.endm3132.macro THUNK reg3334.align RETPOLINE_THUNK_SIZE35SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)36UNWIND_HINT_UNDEFINED37ANNOTATE_NOENDBR3839ALTERNATIVE_2 __stringify(RETPOLINE \reg), \40__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \41__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)42SYM_PIC_ALIAS(__x86_indirect_thunk_\reg)4344.endm4546/*47* Despite being an assembler file we can't just use .irp here48* because __KSYM_DEPS__ only uses the C preprocessor and would49* only see one instance of "__x86_indirect_thunk_\reg" rather50* than one per register with the correct names. So we do it51* the simple and nasty way...52*53* Worse, you can only have a single EXPORT_SYMBOL per line,54* and CPP can't insert newlines, so we have to repeat everything55* at least twice.56*/5758#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)5960.align RETPOLINE_THUNK_SIZE61SYM_CODE_START(__x86_indirect_thunk_array)6263#define GEN(reg) THUNK reg64#include <asm/GEN-for-each-reg.h>65#undef GEN6667.align RETPOLINE_THUNK_SIZE68SYM_CODE_END(__x86_indirect_thunk_array)6970#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)71#include <asm/GEN-for-each-reg.h>72#undef GEN7374#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING75.macro CALL_THUNK reg76.align RETPOLINE_THUNK_SIZE7778SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)79UNWIND_HINT_UNDEFINED80ANNOTATE_NOENDBR8182CALL_DEPTH_ACCOUNT83POLINE \reg84ANNOTATE_UNRET_SAFE85ret86int387.endm8889.align RETPOLINE_THUNK_SIZE90SYM_CODE_START(__x86_indirect_call_thunk_array)9192#define GEN(reg) CALL_THUNK reg93#include <asm/GEN-for-each-reg.h>94#undef GEN9596.align RETPOLINE_THUNK_SIZE97SYM_CODE_END(__x86_indirect_call_thunk_array)9899#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)100#include <asm/GEN-for-each-reg.h>101#undef GEN102103.macro JUMP_THUNK reg104.align RETPOLINE_THUNK_SIZE105106SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)107UNWIND_HINT_UNDEFINED108ANNOTATE_NOENDBR109POLINE \reg110ANNOTATE_UNRET_SAFE111ret112int3113.endm114115.align RETPOLINE_THUNK_SIZE116SYM_CODE_START(__x86_indirect_jump_thunk_array)117118#define GEN(reg) JUMP_THUNK reg119#include <asm/GEN-for-each-reg.h>120#undef GEN121122.align RETPOLINE_THUNK_SIZE123SYM_CODE_END(__x86_indirect_jump_thunk_array)124125#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)126#include <asm/GEN-for-each-reg.h>127#undef GEN128#endif129130#ifdef CONFIG_MITIGATION_RETHUNK131132/*133* Be careful here: that label cannot really be removed because in134* some configurations and toolchains, the JMP __x86_return_thunk the135* compiler issues is either a short one or the compiler doesn't use136* relocations for same-section JMPs and that breaks the returns137* detection logic in apply_returns() and in objtool.138*/139.section .text..__x86.return_thunk140141#ifdef CONFIG_MITIGATION_SRSO142143/*144* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at145* special addresses:146*147* - srso_alias_untrain_ret() is 2M aligned148* - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14149* and 20 in its virtual address are set (while those bits in the150* srso_alias_untrain_ret() function are cleared).151*152* This guarantees that those two addresses will alias in the branch153* target buffer of Zen3/4 generations, leading to any potential154* poisoned entries at that BTB slot to get evicted.155*156* As a result, srso_alias_safe_ret() becomes a safe return.157*/158.pushsection .text..__x86.rethunk_untrain159SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)160UNWIND_HINT_FUNC161ANNOTATE_NOENDBR162ASM_NOP2163lfence164jmp srso_alias_return_thunk165SYM_FUNC_END(srso_alias_untrain_ret)166__EXPORT_THUNK(srso_alias_untrain_ret)167.popsection168169.pushsection .text..__x86.rethunk_safe170SYM_CODE_START_NOALIGN(srso_alias_safe_ret)171lea 8(%_ASM_SP), %_ASM_SP172UNWIND_HINT_FUNC173ANNOTATE_UNRET_SAFE174ret175int3176SYM_FUNC_END(srso_alias_safe_ret)177178SYM_CODE_START_NOALIGN(srso_alias_return_thunk)179UNWIND_HINT_FUNC180ANNOTATE_NOENDBR181call srso_alias_safe_ret182ud2183SYM_CODE_END(srso_alias_return_thunk)184.popsection185186/*187* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()188* above. On kernel entry, srso_untrain_ret() is executed which is a189*190* movabs $0xccccc30824648d48,%rax191*192* and when the return thunk executes the inner label srso_safe_ret()193* later, it is a stack manipulation and a RET which is mispredicted and194* thus a "safe" one to use.195*/196.align 64197.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc198SYM_CODE_START_LOCAL_NOALIGN(srso_untrain_ret)199ANNOTATE_NOENDBR200.byte 0x48, 0xb8201202/*203* This forces the function return instruction to speculate into a trap204* (UD2 in srso_return_thunk() below). This RET will then mispredict205* and execution will continue at the return site read from the top of206* the stack.207*/208SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)209lea 8(%_ASM_SP), %_ASM_SP210ret211int3212int3213/* end of movabs */214lfence215call srso_safe_ret216ud2217SYM_CODE_END(srso_safe_ret)218SYM_FUNC_END(srso_untrain_ret)219220SYM_CODE_START(srso_return_thunk)221UNWIND_HINT_FUNC222ANNOTATE_NOENDBR223call srso_safe_ret224ud2225SYM_CODE_END(srso_return_thunk)226227#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"228#else /* !CONFIG_MITIGATION_SRSO */229/* Dummy for the alternative in CALL_UNTRAIN_RET. */230SYM_CODE_START(srso_alias_untrain_ret)231ANNOTATE_UNRET_SAFE232ANNOTATE_NOENDBR233ret234int3235SYM_FUNC_END(srso_alias_untrain_ret)236__EXPORT_THUNK(srso_alias_untrain_ret)237#define JMP_SRSO_UNTRAIN_RET "ud2"238#endif /* CONFIG_MITIGATION_SRSO */239240#ifdef CONFIG_MITIGATION_UNRET_ENTRY241242/*243* Some generic notes on the untraining sequences:244*245* They are interchangeable when it comes to flushing potentially wrong246* RET predictions from the BTB.247*248* The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the249* Retbleed sequence because the return sequence done there250* (srso_safe_ret()) is longer and the return sequence must fully nest251* (end before) the untraining sequence. Therefore, the untraining252* sequence must fully overlap the return sequence.253*254* Regarding alignment - the instructions which need to be untrained,255* must all start at a cacheline boundary for Zen1/2 generations. That256* is, instruction sequences starting at srso_safe_ret() and257* the respective instruction sequences at retbleed_return_thunk()258* must start at a cacheline boundary.259*/260261/*262* Safety details here pertain to the AMD Zen{1,2} microarchitecture:263* 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for264* alignment within the BTB.265* 2) The instruction at retbleed_untrain_ret must contain, and not266* end with, the 0xc3 byte of the RET.267* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread268* from re-poisioning the BTB prediction.269*/270.align 64271.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc272SYM_CODE_START_LOCAL_NOALIGN(retbleed_untrain_ret)273ANNOTATE_NOENDBR274/*275* As executed from retbleed_untrain_ret, this is:276*277* TEST $0xcc, %bl278* LFENCE279* JMP retbleed_return_thunk280*281* Executing the TEST instruction has a side effect of evicting any BTB282* prediction (potentially attacker controlled) attached to the RET, as283* retbleed_return_thunk + 1 isn't an instruction boundary at the moment.284*/285.byte 0xf6286287/*288* As executed from retbleed_return_thunk, this is a plain RET.289*290* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.291*292* We subsequently jump backwards and architecturally execute the RET.293* This creates a correct BTB prediction (type=ret), but in the294* meantime we suffer Straight Line Speculation (because the type was295* no branch) which is halted by the INT3.296*297* With SMT enabled and STIBP active, a sibling thread cannot poison298* RET's prediction to a type of its choice, but can evict the299* prediction due to competitive sharing. If the prediction is300* evicted, retbleed_return_thunk will suffer Straight Line Speculation301* which will be contained safely by the INT3.302*/303SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)304ret305int3306SYM_CODE_END(retbleed_return_thunk)307308/*309* Ensure the TEST decoding / BTB invalidation is complete.310*/311lfence312313/*314* Jump back and execute the RET in the middle of the TEST instruction.315* INT3 is for SLS protection.316*/317jmp retbleed_return_thunk318int3319SYM_FUNC_END(retbleed_untrain_ret)320321#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"322#else /* !CONFIG_MITIGATION_UNRET_ENTRY */323#define JMP_RETBLEED_UNTRAIN_RET "ud2"324#endif /* CONFIG_MITIGATION_UNRET_ENTRY */325326#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)327328SYM_FUNC_START(entry_untrain_ret)329ANNOTATE_NOENDBR330ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO331SYM_FUNC_END(entry_untrain_ret)332__EXPORT_THUNK(entry_untrain_ret)333334#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_MITIGATION_SRSO */335336#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING337338.align 64339SYM_FUNC_START(call_depth_return_thunk)340ANNOTATE_NOENDBR341/*342* Keep the hotpath in a 16byte I-fetch for the non-debug343* case.344*/345CALL_THUNKS_DEBUG_INC_RETS346shlq $5, PER_CPU_VAR(__x86_call_depth)347jz 1f348ANNOTATE_UNRET_SAFE349ret350int33511:352CALL_THUNKS_DEBUG_INC_STUFFS353.rept 16354ANNOTATE_INTRA_FUNCTION_CALL355call 2f356int33572:358.endr359add $(8*16), %rsp360361CREDIT_CALL_DEPTH362363ANNOTATE_UNRET_SAFE364ret365int3366SYM_FUNC_END(call_depth_return_thunk)367368#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */369370#ifdef CONFIG_MITIGATION_ITS371372.macro ITS_THUNK reg373374/*375* If CFI paranoid is used then the ITS thunk starts with opcodes (0xea; jne 1b)376* that complete the fineibt_paranoid caller sequence.377*/3781: .byte 0xea379SYM_INNER_LABEL(__x86_indirect_paranoid_thunk_\reg, SYM_L_GLOBAL)380UNWIND_HINT_UNDEFINED381ANNOTATE_NOENDBR382jne 1b383SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)384UNWIND_HINT_UNDEFINED385ANNOTATE_NOENDBR386ANNOTATE_RETPOLINE_SAFE387jmp *%\reg388int3389.align 32, 0xcc /* fill to the end of the line */390.skip 32 - (__x86_indirect_its_thunk_\reg - 1b), 0xcc /* skip to the next upper half */391.endm392393/* ITS mitigation requires thunks be aligned to upper half of cacheline */394.align 64, 0xcc395.skip 29, 0xcc396397#define GEN(reg) ITS_THUNK reg398#include <asm/GEN-for-each-reg.h>399#undef GEN400401.align 64, 0xcc402SYM_FUNC_ALIAS(__x86_indirect_its_thunk_array, __x86_indirect_its_thunk_rax)403SYM_CODE_END(__x86_indirect_its_thunk_array)404405.align 64, 0xcc406.skip 32, 0xcc407SYM_CODE_START(its_return_thunk)408UNWIND_HINT_FUNC409ANNOTATE_NOENDBR410ANNOTATE_UNRET_SAFE411ret412int3413SYM_CODE_END(its_return_thunk)414EXPORT_SYMBOL(its_return_thunk)415416#endif /* CONFIG_MITIGATION_ITS */417418/*419* This function name is magical and is used by -mfunction-return=thunk-extern420* for the compiler to generate JMPs to it.421*422* This code is only used during kernel boot or module init. All423* 'JMP __x86_return_thunk' sites are changed to something else by424* apply_returns().425*426* The ALTERNATIVE below adds a really loud warning to catch the case427* where the insufficient default return thunk ends up getting used for428* whatever reason like miscompilation or failure of429* objtool/alternatives/etc to patch all the return sites.430*/431SYM_CODE_START(__x86_return_thunk)432UNWIND_HINT_FUNC433ANNOTATE_NOENDBR434#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \435defined(CONFIG_MITIGATION_SRSO) || \436defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)437ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \438"jmp warn_thunk_thunk", X86_FEATURE_ALWAYS439#else440ANNOTATE_UNRET_SAFE441ret442#endif443int3444SYM_CODE_END(__x86_return_thunk)445SYM_PIC_ALIAS(__x86_return_thunk)446EXPORT_SYMBOL(__x86_return_thunk)447448#endif /* CONFIG_MITIGATION_RETHUNK */449450451