Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/arch/x86/annotate/instructions.c
50674 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* x86 instruction nmemonic table to parse disasm lines for annotate.
4
* This table is searched twice - one for exact match and another for
5
* match without a size suffix (b, w, l, q) in case of AT&T syntax.
6
*
7
* So this table should not have entries with the suffix unless it's
8
* a complete different instruction than ones without the suffix.
9
*/
10
static struct ins x86__instructions[] = {
11
{ .name = "adc", .ops = &mov_ops, },
12
{ .name = "add", .ops = &mov_ops, },
13
{ .name = "addsd", .ops = &mov_ops, },
14
{ .name = "and", .ops = &mov_ops, },
15
{ .name = "andpd", .ops = &mov_ops, },
16
{ .name = "andps", .ops = &mov_ops, },
17
{ .name = "bsr", .ops = &mov_ops, },
18
{ .name = "bt", .ops = &mov_ops, },
19
{ .name = "btr", .ops = &mov_ops, },
20
{ .name = "bts", .ops = &mov_ops, },
21
{ .name = "call", .ops = &call_ops, },
22
{ .name = "cmovbe", .ops = &mov_ops, },
23
{ .name = "cmove", .ops = &mov_ops, },
24
{ .name = "cmovae", .ops = &mov_ops, },
25
{ .name = "cmp", .ops = &mov_ops, },
26
{ .name = "cmpxch", .ops = &mov_ops, },
27
{ .name = "cmpxchg", .ops = &mov_ops, },
28
{ .name = "cs", .ops = &mov_ops, },
29
{ .name = "dec", .ops = &dec_ops, },
30
{ .name = "divsd", .ops = &mov_ops, },
31
{ .name = "divss", .ops = &mov_ops, },
32
{ .name = "gs", .ops = &mov_ops, },
33
{ .name = "imul", .ops = &mov_ops, },
34
{ .name = "inc", .ops = &dec_ops, },
35
{ .name = "ja", .ops = &jump_ops, },
36
{ .name = "jae", .ops = &jump_ops, },
37
{ .name = "jb", .ops = &jump_ops, },
38
{ .name = "jbe", .ops = &jump_ops, },
39
{ .name = "jc", .ops = &jump_ops, },
40
{ .name = "jcxz", .ops = &jump_ops, },
41
{ .name = "je", .ops = &jump_ops, },
42
{ .name = "jecxz", .ops = &jump_ops, },
43
{ .name = "jg", .ops = &jump_ops, },
44
{ .name = "jge", .ops = &jump_ops, },
45
{ .name = "jl", .ops = &jump_ops, },
46
{ .name = "jle", .ops = &jump_ops, },
47
{ .name = "jmp", .ops = &jump_ops, },
48
{ .name = "jna", .ops = &jump_ops, },
49
{ .name = "jnae", .ops = &jump_ops, },
50
{ .name = "jnb", .ops = &jump_ops, },
51
{ .name = "jnbe", .ops = &jump_ops, },
52
{ .name = "jnc", .ops = &jump_ops, },
53
{ .name = "jne", .ops = &jump_ops, },
54
{ .name = "jng", .ops = &jump_ops, },
55
{ .name = "jnge", .ops = &jump_ops, },
56
{ .name = "jnl", .ops = &jump_ops, },
57
{ .name = "jnle", .ops = &jump_ops, },
58
{ .name = "jno", .ops = &jump_ops, },
59
{ .name = "jnp", .ops = &jump_ops, },
60
{ .name = "jns", .ops = &jump_ops, },
61
{ .name = "jnz", .ops = &jump_ops, },
62
{ .name = "jo", .ops = &jump_ops, },
63
{ .name = "jp", .ops = &jump_ops, },
64
{ .name = "jpe", .ops = &jump_ops, },
65
{ .name = "jpo", .ops = &jump_ops, },
66
{ .name = "jrcxz", .ops = &jump_ops, },
67
{ .name = "js", .ops = &jump_ops, },
68
{ .name = "jz", .ops = &jump_ops, },
69
{ .name = "lea", .ops = &mov_ops, },
70
{ .name = "lock", .ops = &lock_ops, },
71
{ .name = "mov", .ops = &mov_ops, },
72
{ .name = "movapd", .ops = &mov_ops, },
73
{ .name = "movaps", .ops = &mov_ops, },
74
{ .name = "movdqa", .ops = &mov_ops, },
75
{ .name = "movdqu", .ops = &mov_ops, },
76
{ .name = "movsd", .ops = &mov_ops, },
77
{ .name = "movss", .ops = &mov_ops, },
78
{ .name = "movsb", .ops = &mov_ops, },
79
{ .name = "movsw", .ops = &mov_ops, },
80
{ .name = "movsl", .ops = &mov_ops, },
81
{ .name = "movupd", .ops = &mov_ops, },
82
{ .name = "movups", .ops = &mov_ops, },
83
{ .name = "movzb", .ops = &mov_ops, },
84
{ .name = "movzw", .ops = &mov_ops, },
85
{ .name = "movzl", .ops = &mov_ops, },
86
{ .name = "mulsd", .ops = &mov_ops, },
87
{ .name = "mulss", .ops = &mov_ops, },
88
{ .name = "nop", .ops = &nop_ops, },
89
{ .name = "or", .ops = &mov_ops, },
90
{ .name = "orps", .ops = &mov_ops, },
91
{ .name = "pand", .ops = &mov_ops, },
92
{ .name = "paddq", .ops = &mov_ops, },
93
{ .name = "pcmpeqb", .ops = &mov_ops, },
94
{ .name = "por", .ops = &mov_ops, },
95
{ .name = "rcl", .ops = &mov_ops, },
96
{ .name = "ret", .ops = &ret_ops, },
97
{ .name = "sbb", .ops = &mov_ops, },
98
{ .name = "sete", .ops = &mov_ops, },
99
{ .name = "sub", .ops = &mov_ops, },
100
{ .name = "subsd", .ops = &mov_ops, },
101
{ .name = "test", .ops = &mov_ops, },
102
{ .name = "tzcnt", .ops = &mov_ops, },
103
{ .name = "ucomisd", .ops = &mov_ops, },
104
{ .name = "ucomiss", .ops = &mov_ops, },
105
{ .name = "vaddsd", .ops = &mov_ops, },
106
{ .name = "vandpd", .ops = &mov_ops, },
107
{ .name = "vmovdqa", .ops = &mov_ops, },
108
{ .name = "vmovq", .ops = &mov_ops, },
109
{ .name = "vmovsd", .ops = &mov_ops, },
110
{ .name = "vmulsd", .ops = &mov_ops, },
111
{ .name = "vorpd", .ops = &mov_ops, },
112
{ .name = "vsubsd", .ops = &mov_ops, },
113
{ .name = "vucomisd", .ops = &mov_ops, },
114
{ .name = "xadd", .ops = &mov_ops, },
115
{ .name = "xbegin", .ops = &jump_ops, },
116
{ .name = "xchg", .ops = &mov_ops, },
117
{ .name = "xor", .ops = &mov_ops, },
118
{ .name = "xorpd", .ops = &mov_ops, },
119
{ .name = "xorps", .ops = &mov_ops, },
120
};
121
122
static bool amd__ins_is_fused(struct arch *arch, const char *ins1,
123
const char *ins2)
124
{
125
if (strstr(ins2, "jmp"))
126
return false;
127
128
/* Family >= 15h supports cmp/test + branch fusion */
129
if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
130
(strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
131
return true;
132
}
133
134
/* Family >= 19h supports some ALU + branch fusion */
135
if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
136
strstarts(ins1, "sub") || strstarts(ins1, "and") ||
137
strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
138
strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
139
return true;
140
}
141
142
return false;
143
}
144
145
static bool intel__ins_is_fused(struct arch *arch, const char *ins1,
146
const char *ins2)
147
{
148
if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
149
return false;
150
151
if (arch->model == 0x1e) {
152
/* Nehalem */
153
if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
154
strstr(ins1, "test")) {
155
return true;
156
}
157
} else {
158
/* Newer platform */
159
if ((strstr(ins1, "cmp") && !strstr(ins1, "xchg")) ||
160
strstr(ins1, "test") ||
161
strstr(ins1, "add") ||
162
strstr(ins1, "sub") ||
163
strstr(ins1, "and") ||
164
strstr(ins1, "inc") ||
165
strstr(ins1, "dec")) {
166
return true;
167
}
168
}
169
170
return false;
171
}
172
173
static int x86__cpuid_parse(struct arch *arch, char *cpuid)
174
{
175
unsigned int family, model, stepping;
176
int ret;
177
178
/*
179
* cpuid = "GenuineIntel,family,model,stepping"
180
*/
181
ret = sscanf(cpuid, "%*[^,],%u,%u,%u", &family, &model, &stepping);
182
if (ret == 3) {
183
arch->family = family;
184
arch->model = model;
185
arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
186
amd__ins_is_fused :
187
intel__ins_is_fused;
188
return 0;
189
}
190
191
return -1;
192
}
193
194
static int x86__annotate_init(struct arch *arch, char *cpuid)
195
{
196
int err = 0;
197
198
if (arch->initialized)
199
return 0;
200
201
if (cpuid) {
202
if (x86__cpuid_parse(arch, cpuid))
203
err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
204
}
205
arch->e_machine = EM_X86_64;
206
arch->e_flags = 0;
207
arch->initialized = true;
208
return err;
209
}
210
211
#ifdef HAVE_LIBDW_SUPPORT
212
static void update_insn_state_x86(struct type_state *state,
213
struct data_loc_info *dloc, Dwarf_Die *cu_die,
214
struct disasm_line *dl)
215
{
216
struct annotated_insn_loc loc;
217
struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
218
struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
219
struct type_state_reg *tsr;
220
Dwarf_Die type_die;
221
u32 insn_offset = dl->al.offset;
222
int fbreg = dloc->fbreg;
223
int fboff = 0;
224
225
if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
226
return;
227
228
if (ins__is_call(&dl->ins)) {
229
struct symbol *func = dl->ops.target.sym;
230
231
if (func == NULL)
232
return;
233
234
/* __fentry__ will preserve all registers */
235
if (!strcmp(func->name, "__fentry__"))
236
return;
237
238
pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
239
240
/* Otherwise invalidate caller-saved registers after call */
241
for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
242
if (state->regs[i].caller_saved)
243
state->regs[i].ok = false;
244
}
245
246
/* Update register with the return type (if any) */
247
if (die_find_func_rettype(cu_die, func->name, &type_die)) {
248
tsr = &state->regs[state->ret_reg];
249
tsr->type = type_die;
250
tsr->kind = TSR_KIND_TYPE;
251
tsr->offset = 0;
252
tsr->ok = true;
253
254
pr_debug_dtp("call [%x] return -> reg%d",
255
insn_offset, state->ret_reg);
256
pr_debug_type_name(&type_die, tsr->kind);
257
}
258
return;
259
}
260
261
if (!strncmp(dl->ins.name, "add", 3)) {
262
u64 imm_value = -1ULL;
263
int offset;
264
const char *var_name = NULL;
265
struct map_symbol *ms = dloc->ms;
266
u64 ip = ms->sym->start + dl->al.offset;
267
268
if (!has_reg_type(state, dst->reg1))
269
return;
270
271
tsr = &state->regs[dst->reg1];
272
tsr->copied_from = -1;
273
274
if (src->imm)
275
imm_value = src->offset;
276
else if (has_reg_type(state, src->reg1) &&
277
state->regs[src->reg1].kind == TSR_KIND_CONST)
278
imm_value = state->regs[src->reg1].imm_value;
279
else if (src->reg1 == DWARF_REG_PC) {
280
u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
281
src->offset, dl);
282
283
if (get_global_var_info(dloc, var_addr,
284
&var_name, &offset) &&
285
!strcmp(var_name, "this_cpu_off") &&
286
tsr->kind == TSR_KIND_CONST) {
287
tsr->kind = TSR_KIND_PERCPU_BASE;
288
tsr->offset = 0;
289
tsr->ok = true;
290
imm_value = tsr->imm_value;
291
}
292
}
293
else
294
return;
295
296
/* Ignore add to non-pointer or non-const types */
297
if (tsr->kind == TSR_KIND_POINTER ||
298
(dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
299
src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
300
tsr->offset += imm_value;
301
pr_debug_dtp("add [%x] offset %#"PRIx64" to reg%d",
302
insn_offset, imm_value, dst->reg1);
303
pr_debug_type_name(&tsr->type, tsr->kind);
304
}
305
306
if (tsr->kind == TSR_KIND_CONST)
307
tsr->imm_value += imm_value;
308
309
if (tsr->kind != TSR_KIND_PERCPU_BASE)
310
return;
311
312
if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
313
&type_die) && offset == 0) {
314
/*
315
* This is not a pointer type, but it should be treated
316
* as a pointer.
317
*/
318
tsr->type = type_die;
319
tsr->kind = TSR_KIND_PERCPU_POINTER;
320
tsr->offset = 0;
321
tsr->ok = true;
322
323
pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
324
insn_offset, imm_value, dst->reg1);
325
pr_debug_type_name(&tsr->type, tsr->kind);
326
}
327
return;
328
}
329
330
if (!strncmp(dl->ins.name, "sub", 3)) {
331
u64 imm_value = -1ULL;
332
333
if (!has_reg_type(state, dst->reg1))
334
return;
335
336
tsr = &state->regs[dst->reg1];
337
tsr->copied_from = -1;
338
339
if (src->imm)
340
imm_value = src->offset;
341
else if (has_reg_type(state, src->reg1) &&
342
state->regs[src->reg1].kind == TSR_KIND_CONST)
343
imm_value = state->regs[src->reg1].imm_value;
344
345
if (tsr->kind == TSR_KIND_POINTER ||
346
(dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
347
src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
348
tsr->offset -= imm_value;
349
pr_debug_dtp("sub [%x] offset %#"PRIx64" to reg%d",
350
insn_offset, imm_value, dst->reg1);
351
pr_debug_type_name(&tsr->type, tsr->kind);
352
}
353
354
if (tsr->kind == TSR_KIND_CONST)
355
tsr->imm_value -= imm_value;
356
357
return;
358
}
359
360
if (!strncmp(dl->ins.name, "lea", 3)) {
361
int sreg = src->reg1;
362
struct type_state_reg src_tsr;
363
364
if (!has_reg_type(state, sreg) ||
365
!has_reg_type(state, dst->reg1) ||
366
!src->mem_ref)
367
return;
368
369
src_tsr = state->regs[sreg];
370
tsr = &state->regs[dst->reg1];
371
372
tsr->copied_from = -1;
373
tsr->ok = false;
374
375
/* Case 1: Based on stack pointer or frame pointer */
376
if (sreg == fbreg || sreg == state->stack_reg) {
377
struct type_state_stack *stack;
378
int offset = src->offset - fboff;
379
380
stack = find_stack_state(state, offset);
381
if (!stack)
382
return;
383
384
tsr->type = stack->type;
385
tsr->kind = TSR_KIND_POINTER;
386
tsr->offset = offset - stack->offset;
387
tsr->ok = true;
388
389
if (sreg == fbreg) {
390
pr_debug_dtp("lea [%x] address of -%#x(stack) -> reg%d",
391
insn_offset, -src->offset, dst->reg1);
392
} else {
393
pr_debug_dtp("lea [%x] address of %#x(reg%d) -> reg%d",
394
insn_offset, src->offset, sreg, dst->reg1);
395
}
396
397
pr_debug_type_name(&tsr->type, tsr->kind);
398
}
399
/* Case 2: Based on a register holding a typed pointer */
400
else if (src_tsr.ok && (src_tsr.kind == TSR_KIND_POINTER ||
401
(dwarf_tag(&src_tsr.type) == DW_TAG_pointer_type &&
402
src_tsr.kind == TSR_KIND_TYPE))) {
403
404
if (src_tsr.kind == TSR_KIND_TYPE &&
405
__die_get_real_type(&state->regs[sreg].type, &type_die) == NULL)
406
return;
407
408
if (src_tsr.kind == TSR_KIND_POINTER)
409
type_die = state->regs[sreg].type;
410
411
/* Check if the target type has a member at the new offset */
412
if (die_get_member_type(&type_die,
413
src->offset + src_tsr.offset, &type_die) == NULL)
414
return;
415
416
tsr->type = src_tsr.type;
417
tsr->kind = src_tsr.kind;
418
tsr->offset = src->offset + src_tsr.offset;
419
tsr->ok = true;
420
421
pr_debug_dtp("lea [%x] address of %s%#x(reg%d) -> reg%d",
422
insn_offset, src->offset < 0 ? "-" : "",
423
abs(src->offset), sreg, dst->reg1);
424
425
pr_debug_type_name(&tsr->type, tsr->kind);
426
}
427
return;
428
}
429
430
/* Invalidate register states for other ops which may change pointers */
431
if (has_reg_type(state, dst->reg1) && !dst->mem_ref &&
432
dwarf_tag(&state->regs[dst->reg1].type) == DW_TAG_pointer_type) {
433
if (!strncmp(dl->ins.name, "imul", 4) || !strncmp(dl->ins.name, "mul", 3) ||
434
!strncmp(dl->ins.name, "idiv", 4) || !strncmp(dl->ins.name, "div", 3) ||
435
!strncmp(dl->ins.name, "shl", 3) || !strncmp(dl->ins.name, "shr", 3) ||
436
!strncmp(dl->ins.name, "sar", 3) || !strncmp(dl->ins.name, "and", 3) ||
437
!strncmp(dl->ins.name, "or", 2) || !strncmp(dl->ins.name, "neg", 3) ||
438
!strncmp(dl->ins.name, "inc", 3) || !strncmp(dl->ins.name, "dec", 3)) {
439
pr_debug_dtp("%s [%x] invalidate reg%d\n",
440
dl->ins.name, insn_offset, dst->reg1);
441
state->regs[dst->reg1].ok = false;
442
state->regs[dst->reg1].copied_from = -1;
443
return;
444
}
445
446
if (!strncmp(dl->ins.name, "xor", 3) && dst->reg1 == src->reg1) {
447
/* xor reg, reg clears the register */
448
pr_debug_dtp("xor [%x] clear reg%d\n",
449
insn_offset, dst->reg1);
450
451
state->regs[dst->reg1].kind = TSR_KIND_CONST;
452
state->regs[dst->reg1].imm_value = 0;
453
state->regs[dst->reg1].ok = true;
454
state->regs[dst->reg1].copied_from = -1;
455
return;
456
}
457
}
458
459
if (strncmp(dl->ins.name, "mov", 3))
460
return;
461
462
if (dloc->fb_cfa) {
463
u64 ip = dloc->ms->sym->start + dl->al.offset;
464
u64 pc = map__rip_2objdump(dloc->ms->map, ip);
465
466
if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
467
fbreg = -1;
468
}
469
470
/* Case 1. register to register or segment:offset to register transfers */
471
if (!src->mem_ref && !dst->mem_ref) {
472
if (!has_reg_type(state, dst->reg1))
473
return;
474
475
tsr = &state->regs[dst->reg1];
476
tsr->copied_from = -1;
477
478
if (dso__kernel(map__dso(dloc->ms->map)) &&
479
src->segment == INSN_SEG_X86_GS && src->imm) {
480
u64 ip = dloc->ms->sym->start + dl->al.offset;
481
u64 var_addr;
482
int offset;
483
484
/*
485
* In kernel, %gs points to a per-cpu region for the
486
* current CPU. Access with a constant offset should
487
* be treated as a global variable access.
488
*/
489
var_addr = src->offset;
490
491
if (var_addr == 40) {
492
tsr->kind = TSR_KIND_CANARY;
493
tsr->offset = 0;
494
tsr->ok = true;
495
496
pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
497
insn_offset, dst->reg1);
498
return;
499
}
500
501
if (!get_global_var_type(cu_die, dloc, ip, var_addr,
502
&offset, &type_die) ||
503
!die_get_member_type(&type_die, offset, &type_die)) {
504
tsr->ok = false;
505
return;
506
}
507
508
tsr->type = type_die;
509
tsr->kind = TSR_KIND_TYPE;
510
tsr->offset = 0;
511
tsr->ok = true;
512
513
pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
514
insn_offset, var_addr, dst->reg1);
515
pr_debug_type_name(&tsr->type, tsr->kind);
516
return;
517
}
518
519
if (src->imm) {
520
tsr->kind = TSR_KIND_CONST;
521
tsr->imm_value = src->offset;
522
tsr->offset = 0;
523
tsr->ok = true;
524
525
pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
526
insn_offset, tsr->imm_value, dst->reg1);
527
return;
528
}
529
530
if (!has_reg_type(state, src->reg1) ||
531
!state->regs[src->reg1].ok) {
532
tsr->ok = false;
533
return;
534
}
535
536
tsr->type = state->regs[src->reg1].type;
537
tsr->kind = state->regs[src->reg1].kind;
538
tsr->imm_value = state->regs[src->reg1].imm_value;
539
tsr->offset = state->regs[src->reg1].offset;
540
tsr->ok = true;
541
542
/* To copy back the variable type later (hopefully) */
543
if (tsr->kind == TSR_KIND_TYPE || tsr->kind == TSR_KIND_POINTER)
544
tsr->copied_from = src->reg1;
545
546
pr_debug_dtp("mov [%x] reg%d -> reg%d",
547
insn_offset, src->reg1, dst->reg1);
548
pr_debug_type_name(&tsr->type, tsr->kind);
549
}
550
/* Case 2. memory to register transers */
551
if (src->mem_ref && !dst->mem_ref) {
552
int sreg = src->reg1;
553
554
if (!has_reg_type(state, dst->reg1))
555
return;
556
557
tsr = &state->regs[dst->reg1];
558
tsr->copied_from = -1;
559
560
retry:
561
/* Check stack variables with offset */
562
if (sreg == fbreg || sreg == state->stack_reg) {
563
struct type_state_stack *stack;
564
int offset = src->offset - fboff;
565
566
stack = find_stack_state(state, offset);
567
if (stack == NULL) {
568
tsr->ok = false;
569
return;
570
} else if (!stack->compound) {
571
tsr->type = stack->type;
572
tsr->kind = stack->kind;
573
tsr->offset = stack->ptr_offset;
574
tsr->ok = true;
575
} else if (die_get_member_type(&stack->type,
576
offset - stack->offset,
577
&type_die)) {
578
tsr->type = type_die;
579
tsr->kind = TSR_KIND_TYPE;
580
tsr->offset = 0;
581
tsr->ok = true;
582
} else {
583
tsr->ok = false;
584
return;
585
}
586
587
if (sreg == fbreg) {
588
pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
589
insn_offset, -offset, dst->reg1);
590
} else {
591
pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
592
insn_offset, offset, sreg, dst->reg1);
593
}
594
pr_debug_type_name(&tsr->type, tsr->kind);
595
}
596
/* And then dereference the pointer if it has one */
597
else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
598
state->regs[sreg].kind == TSR_KIND_TYPE &&
599
die_deref_ptr_type(&state->regs[sreg].type,
600
src->offset + state->regs[sreg].offset, &type_die)) {
601
tsr->type = type_die;
602
tsr->kind = TSR_KIND_TYPE;
603
tsr->offset = 0;
604
tsr->ok = true;
605
606
pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
607
insn_offset, src->offset, sreg, dst->reg1);
608
pr_debug_type_name(&tsr->type, tsr->kind);
609
}
610
/* Handle dereference of TSR_KIND_POINTER registers */
611
else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
612
state->regs[sreg].kind == TSR_KIND_POINTER &&
613
die_get_member_type(&state->regs[sreg].type,
614
src->offset + state->regs[sreg].offset, &type_die)) {
615
tsr->type = state->regs[sreg].type;
616
tsr->kind = TSR_KIND_TYPE;
617
tsr->offset = src->offset + state->regs[sreg].offset;
618
tsr->ok = true;
619
620
pr_debug_dtp("mov [%x] addr %#x(reg%d) -> reg%d",
621
insn_offset, src->offset, sreg, dst->reg1);
622
pr_debug_type_name(&tsr->type, tsr->kind);
623
}
624
/* Or check if it's a global variable */
625
else if (sreg == DWARF_REG_PC) {
626
struct map_symbol *ms = dloc->ms;
627
u64 ip = ms->sym->start + dl->al.offset;
628
u64 addr;
629
int offset;
630
631
addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
632
633
if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
634
&type_die) ||
635
!die_get_member_type(&type_die, offset, &type_die)) {
636
tsr->ok = false;
637
return;
638
}
639
640
tsr->type = type_die;
641
tsr->kind = TSR_KIND_TYPE;
642
tsr->offset = 0;
643
tsr->ok = true;
644
645
pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
646
insn_offset, addr, dst->reg1);
647
pr_debug_type_name(&type_die, tsr->kind);
648
}
649
/* And check percpu access with base register */
650
else if (has_reg_type(state, sreg) &&
651
state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
652
u64 ip = dloc->ms->sym->start + dl->al.offset;
653
u64 var_addr = src->offset;
654
int offset;
655
656
if (src->multi_regs) {
657
int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
658
659
if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
660
state->regs[reg2].kind == TSR_KIND_CONST)
661
var_addr += state->regs[reg2].imm_value;
662
}
663
664
/*
665
* In kernel, %gs points to a per-cpu region for the
666
* current CPU. Access with a constant offset should
667
* be treated as a global variable access.
668
*/
669
if (get_global_var_type(cu_die, dloc, ip, var_addr,
670
&offset, &type_die) &&
671
die_get_member_type(&type_die, offset, &type_die)) {
672
tsr->type = type_die;
673
tsr->kind = TSR_KIND_TYPE;
674
tsr->offset = 0;
675
tsr->ok = true;
676
677
if (src->multi_regs) {
678
pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
679
insn_offset, src->offset, src->reg1,
680
src->reg2, dst->reg1);
681
} else {
682
pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
683
insn_offset, src->offset, sreg, dst->reg1);
684
}
685
pr_debug_type_name(&tsr->type, tsr->kind);
686
} else {
687
tsr->ok = false;
688
}
689
}
690
/* And then dereference the calculated pointer if it has one */
691
else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
692
state->regs[sreg].kind == TSR_KIND_PERCPU_POINTER &&
693
die_get_member_type(&state->regs[sreg].type,
694
src->offset, &type_die)) {
695
tsr->type = type_die;
696
tsr->kind = TSR_KIND_TYPE;
697
tsr->offset = 0;
698
tsr->ok = true;
699
700
pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
701
insn_offset, src->offset, sreg, dst->reg1);
702
pr_debug_type_name(&tsr->type, tsr->kind);
703
}
704
/* Or try another register if any */
705
else if (src->multi_regs && sreg == src->reg1 &&
706
src->reg1 != src->reg2) {
707
sreg = src->reg2;
708
goto retry;
709
}
710
else {
711
int offset;
712
const char *var_name = NULL;
713
714
/* it might be per-cpu variable (in kernel) access */
715
if (src->offset < 0) {
716
if (get_global_var_info(dloc, (s64)src->offset,
717
&var_name, &offset) &&
718
!strcmp(var_name, "__per_cpu_offset")) {
719
tsr->kind = TSR_KIND_PERCPU_BASE;
720
tsr->offset = 0;
721
tsr->ok = true;
722
723
pr_debug_dtp("mov [%x] percpu base reg%d\n",
724
insn_offset, dst->reg1);
725
return;
726
}
727
}
728
729
tsr->ok = false;
730
}
731
}
732
/* Case 3. register to memory transfers */
733
if (!src->mem_ref && dst->mem_ref) {
734
if (!has_reg_type(state, src->reg1) ||
735
!state->regs[src->reg1].ok)
736
return;
737
738
/* Check stack variables with offset */
739
if (dst->reg1 == fbreg || dst->reg1 == state->stack_reg) {
740
struct type_state_stack *stack;
741
int offset = dst->offset - fboff;
742
743
tsr = &state->regs[src->reg1];
744
745
stack = find_stack_state(state, offset);
746
if (stack) {
747
/*
748
* The source register is likely to hold a type
749
* of member if it's a compound type. Do not
750
* update the stack variable type since we can
751
* get the member type later by using the
752
* die_get_member_type().
753
*/
754
if (!stack->compound)
755
set_stack_state(stack, offset, tsr->kind,
756
&tsr->type, tsr->offset);
757
} else {
758
findnew_stack_state(state, offset, tsr->kind,
759
&tsr->type, tsr->offset);
760
}
761
762
if (dst->reg1 == fbreg) {
763
pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
764
insn_offset, src->reg1, -offset);
765
} else {
766
pr_debug_dtp("mov [%x] reg%d -> %#x(reg%d)",
767
insn_offset, src->reg1, offset, dst->reg1);
768
}
769
if (tsr->offset != 0) {
770
pr_debug_dtp(" reg%d offset %#x ->",
771
src->reg1, tsr->offset);
772
}
773
774
pr_debug_type_name(&tsr->type, tsr->kind);
775
}
776
/*
777
* Ignore other transfers since it'd set a value in a struct
778
* and won't change the type.
779
*/
780
}
781
/* Case 4. memory to memory transfers (not handled for now) */
782
}
783
#endif
784
785