Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/objtool/arch/x86/decode.c
48927 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2015 Josh Poimboeuf <[email protected]>
4
*/
5
6
#include <stdio.h>
7
#include <stdlib.h>
8
9
#define unlikely(cond) (cond)
10
#include <asm/insn.h>
11
#include "../../../arch/x86/lib/inat.c"
12
#include "../../../arch/x86/lib/insn.c"
13
14
#define CONFIG_64BIT 1
15
#include <asm/nops.h>
16
17
#include <asm/orc_types.h>
18
#include <objtool/check.h>
19
#include <objtool/disas.h>
20
#include <objtool/elf.h>
21
#include <objtool/arch.h>
22
#include <objtool/warn.h>
23
#include <objtool/builtin.h>
24
#include <arch/elf.h>
25
26
const char *arch_reg_name[CFI_NUM_REGS] = {
27
"rax", "rcx", "rdx", "rbx",
28
"rsp", "rbp", "rsi", "rdi",
29
"r8", "r9", "r10", "r11",
30
"r12", "r13", "r14", "r15",
31
"ra"
32
};
33
34
int arch_ftrace_match(const char *name)
35
{
36
return !strcmp(name, "__fentry__");
37
}
38
39
static int is_x86_64(const struct elf *elf)
40
{
41
switch (elf->ehdr.e_machine) {
42
case EM_X86_64:
43
return 1;
44
case EM_386:
45
return 0;
46
default:
47
ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
48
return -1;
49
}
50
}
51
52
bool arch_callee_saved_reg(unsigned char reg)
53
{
54
switch (reg) {
55
case CFI_BP:
56
case CFI_BX:
57
case CFI_R12:
58
case CFI_R13:
59
case CFI_R14:
60
case CFI_R15:
61
return true;
62
63
case CFI_AX:
64
case CFI_CX:
65
case CFI_DX:
66
case CFI_SI:
67
case CFI_DI:
68
case CFI_SP:
69
case CFI_R8:
70
case CFI_R9:
71
case CFI_R10:
72
case CFI_R11:
73
case CFI_RA:
74
default:
75
return false;
76
}
77
}
78
79
/* Undo the effects of __pa_symbol() if necessary */
80
static unsigned long phys_to_virt(unsigned long pa)
81
{
82
s64 va = pa;
83
84
if (va > 0)
85
va &= ~(0x80000000);
86
87
return va;
88
}
89
90
s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc)
91
{
92
s64 addend = reloc_addend(reloc);
93
94
if (arch_pc_relative_reloc(reloc))
95
addend += insn->offset + insn->len - reloc_offset(reloc);
96
97
return phys_to_virt(addend);
98
}
99
100
static void scan_for_insn(struct section *sec, unsigned long offset,
101
unsigned long *insn_off, unsigned int *insn_len)
102
{
103
unsigned long o = 0;
104
struct insn insn;
105
106
while (1) {
107
108
insn_decode(&insn, sec->data->d_buf + o, sec_size(sec) - o,
109
INSN_MODE_64);
110
111
if (o + insn.length > offset) {
112
*insn_off = o;
113
*insn_len = insn.length;
114
return;
115
}
116
117
o += insn.length;
118
}
119
}
120
121
u64 arch_adjusted_addend(struct reloc *reloc)
122
{
123
unsigned int type = reloc_type(reloc);
124
s64 addend = reloc_addend(reloc);
125
unsigned long insn_off;
126
unsigned int insn_len;
127
128
if (type == R_X86_64_PLT32)
129
return addend + 4;
130
131
if (type != R_X86_64_PC32 || !is_text_sec(reloc->sec->base))
132
return addend;
133
134
scan_for_insn(reloc->sec->base, reloc_offset(reloc),
135
&insn_off, &insn_len);
136
137
return addend + insn_off + insn_len - reloc_offset(reloc);
138
}
139
140
unsigned long arch_jump_destination(struct instruction *insn)
141
{
142
return insn->offset + insn->len + insn->immediate;
143
}
144
145
bool arch_pc_relative_reloc(struct reloc *reloc)
146
{
147
/*
148
* All relocation types where P (the address of the target)
149
* is included in the computation.
150
*/
151
switch (reloc_type(reloc)) {
152
case R_X86_64_PC8:
153
case R_X86_64_PC16:
154
case R_X86_64_PC32:
155
case R_X86_64_PC64:
156
157
case R_X86_64_PLT32:
158
case R_X86_64_GOTPC32:
159
case R_X86_64_GOTPCREL:
160
return true;
161
162
default:
163
break;
164
}
165
166
return false;
167
}
168
169
#define ADD_OP(op) \
170
if (!(op = calloc(1, sizeof(*op)))) \
171
return -1; \
172
else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
173
174
/*
175
* Helpers to decode ModRM/SIB:
176
*
177
* r/m| AX CX DX BX | SP | BP | SI DI |
178
* | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
179
* Mod+----------------+-----+-----+---------+
180
* 00 | [r/m] |[SIB]|[IP+]| [r/m] |
181
* 01 | [r/m + d8] |[S+d]| [r/m + d8] |
182
* 10 | [r/m + d32] |[S+D]| [r/m + d32] |
183
* 11 | r/ m |
184
*/
185
186
#define mod_is_mem() (modrm_mod != 3)
187
#define mod_is_reg() (modrm_mod == 3)
188
189
#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
190
#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
191
192
/*
193
* Check the ModRM register. If there is a SIB byte then check with
194
* the SIB base register. But if the SIB base is 5 (i.e. CFI_BP) and
195
* ModRM mod is 0 then there is no base register.
196
*/
197
#define rm_is(reg) (have_SIB() ? \
198
sib_base == (reg) && sib_index == CFI_SP && \
199
(sib_base != CFI_BP || modrm_mod != 0) : \
200
modrm_rm == (reg))
201
202
#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
203
#define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
204
205
static bool has_notrack_prefix(struct insn *insn)
206
{
207
int i;
208
209
for (i = 0; i < insn->prefixes.nbytes; i++) {
210
if (insn->prefixes.bytes[i] == 0x3e)
211
return true;
212
}
213
214
return false;
215
}
216
217
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
218
unsigned long offset, unsigned int maxlen,
219
struct instruction *insn)
220
{
221
struct stack_op **ops_list = &insn->stack_ops;
222
const struct elf *elf = file->elf;
223
struct insn ins;
224
int x86_64, ret;
225
unsigned char op1, op2, op3, prefix,
226
rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
227
modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
228
sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
229
struct stack_op *op = NULL;
230
struct symbol *sym;
231
u64 imm;
232
233
x86_64 = is_x86_64(elf);
234
if (x86_64 == -1)
235
return -1;
236
237
ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
238
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
239
if (ret < 0) {
240
ERROR("can't decode instruction at %s:0x%lx", sec->name, offset);
241
return -1;
242
}
243
244
insn->len = ins.length;
245
insn->type = INSN_OTHER;
246
247
if (ins.vex_prefix.nbytes)
248
return 0;
249
250
prefix = ins.prefixes.bytes[0];
251
252
op1 = ins.opcode.bytes[0];
253
op2 = ins.opcode.bytes[1];
254
op3 = ins.opcode.bytes[2];
255
256
if (ins.rex_prefix.nbytes) {
257
rex = ins.rex_prefix.bytes[0];
258
rex_w = X86_REX_W(rex) >> 3;
259
rex_r = X86_REX_R(rex) >> 2;
260
rex_x = X86_REX_X(rex) >> 1;
261
rex_b = X86_REX_B(rex);
262
}
263
264
if (ins.modrm.nbytes) {
265
modrm = ins.modrm.bytes[0];
266
modrm_mod = X86_MODRM_MOD(modrm);
267
modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
268
modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
269
}
270
271
if (ins.sib.nbytes) {
272
sib = ins.sib.bytes[0];
273
/* sib_scale = X86_SIB_SCALE(sib); */
274
sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
275
sib_base = X86_SIB_BASE(sib) + 8*rex_b;
276
}
277
278
switch (op1) {
279
280
case 0x1:
281
case 0x29:
282
if (rex_w && rm_is_reg(CFI_SP)) {
283
284
/* add/sub reg, %rsp */
285
ADD_OP(op) {
286
op->src.type = OP_SRC_ADD;
287
op->src.reg = modrm_reg;
288
op->dest.type = OP_DEST_REG;
289
op->dest.reg = CFI_SP;
290
}
291
}
292
break;
293
294
case 0x50 ... 0x57:
295
296
/* push reg */
297
ADD_OP(op) {
298
op->src.type = OP_SRC_REG;
299
op->src.reg = (op1 & 0x7) + 8*rex_b;
300
op->dest.type = OP_DEST_PUSH;
301
}
302
303
break;
304
305
case 0x58 ... 0x5f:
306
307
/* pop reg */
308
ADD_OP(op) {
309
op->src.type = OP_SRC_POP;
310
op->dest.type = OP_DEST_REG;
311
op->dest.reg = (op1 & 0x7) + 8*rex_b;
312
}
313
314
break;
315
316
case 0x68:
317
case 0x6a:
318
/* push immediate */
319
ADD_OP(op) {
320
op->src.type = OP_SRC_CONST;
321
op->dest.type = OP_DEST_PUSH;
322
}
323
break;
324
325
case 0x70 ... 0x7f:
326
insn->type = INSN_JUMP_CONDITIONAL;
327
break;
328
329
case 0x80 ... 0x83:
330
/*
331
* 1000 00sw : mod OP r/m : immediate
332
*
333
* s - sign extend immediate
334
* w - imm8 / imm32
335
*
336
* OP: 000 ADD 100 AND
337
* 001 OR 101 SUB
338
* 010 ADC 110 XOR
339
* 011 SBB 111 CMP
340
*/
341
342
/* 64bit only */
343
if (!rex_w)
344
break;
345
346
/* %rsp target only */
347
if (!rm_is_reg(CFI_SP))
348
break;
349
350
imm = ins.immediate.value;
351
if (op1 & 2) { /* sign extend */
352
if (op1 & 1) { /* imm32 */
353
imm <<= 32;
354
imm = (s64)imm >> 32;
355
} else { /* imm8 */
356
imm <<= 56;
357
imm = (s64)imm >> 56;
358
}
359
}
360
361
switch (modrm_reg & 7) {
362
case 5:
363
imm = -imm;
364
fallthrough;
365
case 0:
366
/* add/sub imm, %rsp */
367
ADD_OP(op) {
368
op->src.type = OP_SRC_ADD;
369
op->src.reg = CFI_SP;
370
op->src.offset = imm;
371
op->dest.type = OP_DEST_REG;
372
op->dest.reg = CFI_SP;
373
}
374
break;
375
376
case 4:
377
/* and imm, %rsp */
378
ADD_OP(op) {
379
op->src.type = OP_SRC_AND;
380
op->src.reg = CFI_SP;
381
op->src.offset = ins.immediate.value;
382
op->dest.type = OP_DEST_REG;
383
op->dest.reg = CFI_SP;
384
}
385
break;
386
387
default:
388
/* ERROR ? */
389
break;
390
}
391
392
break;
393
394
case 0x89:
395
if (!rex_w)
396
break;
397
398
if (modrm_reg == CFI_SP) {
399
400
if (mod_is_reg()) {
401
/* mov %rsp, reg */
402
ADD_OP(op) {
403
op->src.type = OP_SRC_REG;
404
op->src.reg = CFI_SP;
405
op->dest.type = OP_DEST_REG;
406
op->dest.reg = modrm_rm;
407
}
408
break;
409
410
} else {
411
/* skip RIP relative displacement */
412
if (is_RIP())
413
break;
414
415
/* skip nontrivial SIB */
416
if (have_SIB()) {
417
modrm_rm = sib_base;
418
if (sib_index != CFI_SP)
419
break;
420
}
421
422
/* mov %rsp, disp(%reg) */
423
ADD_OP(op) {
424
op->src.type = OP_SRC_REG;
425
op->src.reg = CFI_SP;
426
op->dest.type = OP_DEST_REG_INDIRECT;
427
op->dest.reg = modrm_rm;
428
op->dest.offset = ins.displacement.value;
429
}
430
break;
431
}
432
433
break;
434
}
435
436
if (rm_is_reg(CFI_SP)) {
437
438
/* mov reg, %rsp */
439
ADD_OP(op) {
440
op->src.type = OP_SRC_REG;
441
op->src.reg = modrm_reg;
442
op->dest.type = OP_DEST_REG;
443
op->dest.reg = CFI_SP;
444
}
445
break;
446
}
447
448
fallthrough;
449
case 0x88:
450
if (!rex_w)
451
break;
452
453
if (rm_is_mem(CFI_BP)) {
454
455
/* mov reg, disp(%rbp) */
456
ADD_OP(op) {
457
op->src.type = OP_SRC_REG;
458
op->src.reg = modrm_reg;
459
op->dest.type = OP_DEST_REG_INDIRECT;
460
op->dest.reg = CFI_BP;
461
op->dest.offset = ins.displacement.value;
462
}
463
break;
464
}
465
466
if (rm_is_mem(CFI_SP)) {
467
468
/* mov reg, disp(%rsp) */
469
ADD_OP(op) {
470
op->src.type = OP_SRC_REG;
471
op->src.reg = modrm_reg;
472
op->dest.type = OP_DEST_REG_INDIRECT;
473
op->dest.reg = CFI_SP;
474
op->dest.offset = ins.displacement.value;
475
}
476
break;
477
}
478
479
break;
480
481
case 0x8b:
482
if (!rex_w)
483
break;
484
485
if (rm_is_mem(CFI_BP)) {
486
487
/* mov disp(%rbp), reg */
488
ADD_OP(op) {
489
op->src.type = OP_SRC_REG_INDIRECT;
490
op->src.reg = CFI_BP;
491
op->src.offset = ins.displacement.value;
492
op->dest.type = OP_DEST_REG;
493
op->dest.reg = modrm_reg;
494
}
495
break;
496
}
497
498
if (rm_is_mem(CFI_SP)) {
499
500
/* mov disp(%rsp), reg */
501
ADD_OP(op) {
502
op->src.type = OP_SRC_REG_INDIRECT;
503
op->src.reg = CFI_SP;
504
op->src.offset = ins.displacement.value;
505
op->dest.type = OP_DEST_REG;
506
op->dest.reg = modrm_reg;
507
}
508
break;
509
}
510
511
break;
512
513
case 0x8d:
514
if (mod_is_reg()) {
515
WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
516
break;
517
}
518
519
/* skip non 64bit ops */
520
if (!rex_w)
521
break;
522
523
/* skip nontrivial SIB */
524
if (have_SIB()) {
525
modrm_rm = sib_base;
526
if (sib_index != CFI_SP)
527
break;
528
}
529
530
/* lea disp(%rip), %dst */
531
if (is_RIP()) {
532
insn->type = INSN_LEA_RIP;
533
break;
534
}
535
536
/* lea disp(%src), %dst */
537
ADD_OP(op) {
538
op->src.offset = ins.displacement.value;
539
if (!op->src.offset) {
540
/* lea (%src), %dst */
541
op->src.type = OP_SRC_REG;
542
} else {
543
/* lea disp(%src), %dst */
544
op->src.type = OP_SRC_ADD;
545
}
546
op->src.reg = modrm_rm;
547
op->dest.type = OP_DEST_REG;
548
op->dest.reg = modrm_reg;
549
}
550
break;
551
552
case 0x8f:
553
/* pop to mem */
554
ADD_OP(op) {
555
op->src.type = OP_SRC_POP;
556
op->dest.type = OP_DEST_MEM;
557
}
558
break;
559
560
case 0x90:
561
if (rex_b) /* XCHG %r8, %rax */
562
break;
563
564
if (prefix == 0xf3) /* REP NOP := PAUSE */
565
break;
566
567
insn->type = INSN_NOP;
568
break;
569
570
case 0x9c:
571
/* pushf */
572
ADD_OP(op) {
573
op->src.type = OP_SRC_CONST;
574
op->dest.type = OP_DEST_PUSHF;
575
}
576
break;
577
578
case 0x9d:
579
/* popf */
580
ADD_OP(op) {
581
op->src.type = OP_SRC_POPF;
582
op->dest.type = OP_DEST_MEM;
583
}
584
break;
585
586
case 0x0f:
587
588
if (op2 == 0x01) {
589
590
switch (insn_last_prefix_id(&ins)) {
591
case INAT_PFX_REPE:
592
case INAT_PFX_REPNE:
593
if (modrm == 0xca)
594
/* eretu/erets */
595
insn->type = INSN_SYSRET;
596
break;
597
default:
598
if (modrm == 0xca)
599
insn->type = INSN_CLAC;
600
else if (modrm == 0xcb)
601
insn->type = INSN_STAC;
602
break;
603
}
604
} else if (op2 >= 0x80 && op2 <= 0x8f) {
605
606
insn->type = INSN_JUMP_CONDITIONAL;
607
608
} else if (op2 == 0x05 || op2 == 0x34) {
609
610
/* syscall, sysenter */
611
insn->type = INSN_SYSCALL;
612
613
} else if (op2 == 0x07 || op2 == 0x35) {
614
615
/* sysret, sysexit */
616
insn->type = INSN_SYSRET;
617
618
} else if (op2 == 0x0b || op2 == 0xb9) {
619
620
/* ud2, ud1 */
621
insn->type = INSN_BUG;
622
623
} else if (op2 == 0x1f) {
624
625
/* 0f 1f /0 := NOPL */
626
if (modrm_reg == 0)
627
insn->type = INSN_NOP;
628
629
} else if (op2 == 0x1e) {
630
631
if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
632
insn->type = INSN_ENDBR;
633
634
635
} else if (op2 == 0x38 && op3 == 0xf8) {
636
if (ins.prefixes.nbytes == 1 &&
637
ins.prefixes.bytes[0] == 0xf2) {
638
/* ENQCMD cannot be used in the kernel. */
639
WARN("ENQCMD instruction at %s:%lx", sec->name, offset);
640
}
641
642
} else if (op2 == 0xa0 || op2 == 0xa8) {
643
644
/* push fs/gs */
645
ADD_OP(op) {
646
op->src.type = OP_SRC_CONST;
647
op->dest.type = OP_DEST_PUSH;
648
}
649
650
} else if (op2 == 0xa1 || op2 == 0xa9) {
651
652
/* pop fs/gs */
653
ADD_OP(op) {
654
op->src.type = OP_SRC_POP;
655
op->dest.type = OP_DEST_MEM;
656
}
657
}
658
659
break;
660
661
case 0xc9:
662
/*
663
* leave
664
*
665
* equivalent to:
666
* mov bp, sp
667
* pop bp
668
*/
669
ADD_OP(op) {
670
op->src.type = OP_SRC_REG;
671
op->src.reg = CFI_BP;
672
op->dest.type = OP_DEST_REG;
673
op->dest.reg = CFI_SP;
674
}
675
ADD_OP(op) {
676
op->src.type = OP_SRC_POP;
677
op->dest.type = OP_DEST_REG;
678
op->dest.reg = CFI_BP;
679
}
680
break;
681
682
case 0xcc:
683
/* int3 */
684
insn->type = INSN_TRAP;
685
break;
686
687
case 0xe3:
688
/* jecxz/jrcxz */
689
insn->type = INSN_JUMP_CONDITIONAL;
690
break;
691
692
case 0xe9:
693
case 0xeb:
694
insn->type = INSN_JUMP_UNCONDITIONAL;
695
break;
696
697
case 0xc2:
698
case 0xc3:
699
insn->type = INSN_RETURN;
700
break;
701
702
case 0xc7: /* mov imm, r/m */
703
if (!opts.noinstr)
704
break;
705
706
if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
707
struct reloc *immr, *disp;
708
struct symbol *func;
709
int idx;
710
711
immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
712
disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
713
714
if (!immr || strcmp(immr->sym->name, "pv_ops"))
715
break;
716
717
idx = (reloc_addend(immr) + 8) / sizeof(void *);
718
719
func = disp->sym;
720
if (disp->sym->type == STT_SECTION)
721
func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
722
if (!func) {
723
ERROR("no func for pv_ops[]");
724
return -1;
725
}
726
727
objtool_pv_add(file, idx, func);
728
}
729
730
break;
731
732
case 0xcf: /* iret */
733
/*
734
* Handle sync_core(), which has an IRET to self.
735
* All other IRET are in STT_NONE entry code.
736
*/
737
sym = find_symbol_containing(sec, offset);
738
if (sym && sym->type == STT_FUNC) {
739
ADD_OP(op) {
740
/* add $40, %rsp */
741
op->src.type = OP_SRC_ADD;
742
op->src.reg = CFI_SP;
743
op->src.offset = 5*8;
744
op->dest.type = OP_DEST_REG;
745
op->dest.reg = CFI_SP;
746
}
747
break;
748
}
749
750
fallthrough;
751
752
case 0xca: /* retf */
753
case 0xcb: /* retf */
754
insn->type = INSN_SYSRET;
755
break;
756
757
case 0xd6: /* udb */
758
insn->type = INSN_BUG;
759
break;
760
761
case 0xe0: /* loopne */
762
case 0xe1: /* loope */
763
case 0xe2: /* loop */
764
insn->type = INSN_JUMP_CONDITIONAL;
765
break;
766
767
case 0xe8:
768
insn->type = INSN_CALL;
769
/*
770
* For the impact on the stack, a CALL behaves like
771
* a PUSH of an immediate value (the return address).
772
*/
773
ADD_OP(op) {
774
op->src.type = OP_SRC_CONST;
775
op->dest.type = OP_DEST_PUSH;
776
}
777
break;
778
779
case 0xfc:
780
insn->type = INSN_CLD;
781
break;
782
783
case 0xfd:
784
insn->type = INSN_STD;
785
break;
786
787
case 0xff:
788
if (modrm_reg == 2 || modrm_reg == 3) {
789
790
insn->type = INSN_CALL_DYNAMIC;
791
if (has_notrack_prefix(&ins))
792
WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
793
794
} else if (modrm_reg == 4) {
795
796
insn->type = INSN_JUMP_DYNAMIC;
797
if (has_notrack_prefix(&ins))
798
WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
799
800
} else if (modrm_reg == 5) {
801
802
/* jmpf */
803
insn->type = INSN_SYSRET;
804
805
} else if (modrm_reg == 6) {
806
807
/* push from mem */
808
ADD_OP(op) {
809
op->src.type = OP_SRC_CONST;
810
op->dest.type = OP_DEST_PUSH;
811
}
812
}
813
814
break;
815
816
default:
817
break;
818
}
819
820
if (ins.immediate.nbytes)
821
insn->immediate = ins.immediate.value;
822
else if (ins.displacement.nbytes)
823
insn->immediate = ins.displacement.value;
824
825
return 0;
826
}
827
828
void arch_initial_func_cfi_state(struct cfi_init_state *state)
829
{
830
int i;
831
832
for (i = 0; i < CFI_NUM_REGS; i++) {
833
state->regs[i].base = CFI_UNDEFINED;
834
state->regs[i].offset = 0;
835
}
836
837
/* initial CFA (call frame address) */
838
state->cfa.base = CFI_SP;
839
state->cfa.offset = 8;
840
841
/* initial RA (return address) */
842
state->regs[CFI_RA].base = CFI_CFA;
843
state->regs[CFI_RA].offset = -8;
844
}
845
846
const char *arch_nop_insn(int len)
847
{
848
static const char nops[5][5] = {
849
{ BYTES_NOP1 },
850
{ BYTES_NOP2 },
851
{ BYTES_NOP3 },
852
{ BYTES_NOP4 },
853
{ BYTES_NOP5 },
854
};
855
856
if (len < 1 || len > 5) {
857
ERROR("invalid NOP size: %d\n", len);
858
return NULL;
859
}
860
861
return nops[len-1];
862
}
863
864
#define BYTE_RET 0xC3
865
866
const char *arch_ret_insn(int len)
867
{
868
static const char ret[5][5] = {
869
{ BYTE_RET },
870
{ BYTE_RET, 0xcc },
871
{ BYTE_RET, 0xcc, BYTES_NOP1 },
872
{ BYTE_RET, 0xcc, BYTES_NOP2 },
873
{ BYTE_RET, 0xcc, BYTES_NOP3 },
874
};
875
876
if (len < 1 || len > 5) {
877
ERROR("invalid RET size: %d\n", len);
878
return NULL;
879
}
880
881
return ret[len-1];
882
}
883
884
int arch_decode_hint_reg(u8 sp_reg, int *base)
885
{
886
switch (sp_reg) {
887
case ORC_REG_UNDEFINED:
888
*base = CFI_UNDEFINED;
889
break;
890
case ORC_REG_SP:
891
*base = CFI_SP;
892
break;
893
case ORC_REG_BP:
894
*base = CFI_BP;
895
break;
896
case ORC_REG_SP_INDIRECT:
897
*base = CFI_SP_INDIRECT;
898
break;
899
case ORC_REG_R10:
900
*base = CFI_R10;
901
break;
902
case ORC_REG_R13:
903
*base = CFI_R13;
904
break;
905
case ORC_REG_DI:
906
*base = CFI_DI;
907
break;
908
case ORC_REG_DX:
909
*base = CFI_DX;
910
break;
911
default:
912
return -1;
913
}
914
915
return 0;
916
}
917
918
bool arch_is_retpoline(struct symbol *sym)
919
{
920
return !strncmp(sym->name, "__x86_indirect_", 15) ||
921
!strncmp(sym->name, "__pi___x86_indirect_", 20);
922
}
923
924
bool arch_is_rethunk(struct symbol *sym)
925
{
926
return !strcmp(sym->name, "__x86_return_thunk") ||
927
!strcmp(sym->name, "__pi___x86_return_thunk");
928
}
929
930
bool arch_is_embedded_insn(struct symbol *sym)
931
{
932
return !strcmp(sym->name, "retbleed_return_thunk") ||
933
!strcmp(sym->name, "srso_alias_safe_ret") ||
934
!strcmp(sym->name, "srso_safe_ret");
935
}
936
937
unsigned int arch_reloc_size(struct reloc *reloc)
938
{
939
switch (reloc_type(reloc)) {
940
case R_X86_64_32:
941
case R_X86_64_32S:
942
case R_X86_64_PC32:
943
case R_X86_64_PLT32:
944
return 4;
945
default:
946
return 8;
947
}
948
}
949
950
bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
951
{
952
switch (reloc_type(reloc)) {
953
case R_X86_64_32:
954
case R_X86_64_32S:
955
case R_X86_64_64:
956
return true;
957
default:
958
return false;
959
}
960
}
961
962
#ifdef DISAS
963
964
int arch_disas_info_init(struct disassemble_info *dinfo)
965
{
966
return disas_info_init(dinfo, bfd_arch_i386,
967
bfd_mach_i386_i386, bfd_mach_x86_64,
968
"att");
969
}
970
971
#endif /* DISAS */
972
973