Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/bpf/bpftool/cfg.c
26285 views
1
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
/* Copyright (C) 2018 Netronome Systems, Inc. */
3
4
#include <linux/list.h>
5
#include <stdlib.h>
6
#include <string.h>
7
8
#include "cfg.h"
9
#include "main.h"
10
#include "xlated_dumper.h"
11
12
struct cfg {
13
struct list_head funcs;
14
int func_num;
15
};
16
17
struct func_node {
18
struct list_head l;
19
struct list_head bbs;
20
struct bpf_insn *start;
21
struct bpf_insn *end;
22
int idx;
23
int bb_num;
24
};
25
26
struct bb_node {
27
struct list_head l;
28
struct list_head e_prevs;
29
struct list_head e_succs;
30
struct bpf_insn *head;
31
struct bpf_insn *tail;
32
int idx;
33
};
34
35
#define EDGE_FLAG_EMPTY 0x0
36
#define EDGE_FLAG_FALLTHROUGH 0x1
37
#define EDGE_FLAG_JUMP 0x2
38
struct edge_node {
39
struct list_head l;
40
struct bb_node *src;
41
struct bb_node *dst;
42
int flags;
43
};
44
45
#define ENTRY_BLOCK_INDEX 0
46
#define EXIT_BLOCK_INDEX 1
47
#define NUM_FIXED_BLOCKS 2
48
#define func_prev(func) list_prev_entry(func, l)
49
#define func_next(func) list_next_entry(func, l)
50
#define bb_prev(bb) list_prev_entry(bb, l)
51
#define bb_next(bb) list_next_entry(bb, l)
52
#define entry_bb(func) func_first_bb(func)
53
#define exit_bb(func) func_last_bb(func)
54
#define cfg_first_func(cfg) \
55
list_first_entry(&cfg->funcs, struct func_node, l)
56
#define cfg_last_func(cfg) \
57
list_last_entry(&cfg->funcs, struct func_node, l)
58
#define func_first_bb(func) \
59
list_first_entry(&func->bbs, struct bb_node, l)
60
#define func_last_bb(func) \
61
list_last_entry(&func->bbs, struct bb_node, l)
62
63
static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
64
{
65
struct func_node *new_func, *func;
66
67
list_for_each_entry(func, &cfg->funcs, l) {
68
if (func->start == insn)
69
return func;
70
else if (func->start > insn)
71
break;
72
}
73
74
func = func_prev(func);
75
new_func = calloc(1, sizeof(*new_func));
76
if (!new_func) {
77
p_err("OOM when allocating FUNC node");
78
return NULL;
79
}
80
new_func->start = insn;
81
new_func->idx = cfg->func_num;
82
list_add(&new_func->l, &func->l);
83
cfg->func_num++;
84
85
return new_func;
86
}
87
88
static struct bb_node *func_append_bb(struct func_node *func,
89
struct bpf_insn *insn)
90
{
91
struct bb_node *new_bb, *bb;
92
93
list_for_each_entry(bb, &func->bbs, l) {
94
if (bb->head == insn)
95
return bb;
96
else if (bb->head > insn)
97
break;
98
}
99
100
bb = bb_prev(bb);
101
new_bb = calloc(1, sizeof(*new_bb));
102
if (!new_bb) {
103
p_err("OOM when allocating BB node");
104
return NULL;
105
}
106
new_bb->head = insn;
107
INIT_LIST_HEAD(&new_bb->e_prevs);
108
INIT_LIST_HEAD(&new_bb->e_succs);
109
list_add(&new_bb->l, &bb->l);
110
111
return new_bb;
112
}
113
114
static struct bb_node *func_insert_dummy_bb(struct list_head *after)
115
{
116
struct bb_node *bb;
117
118
bb = calloc(1, sizeof(*bb));
119
if (!bb) {
120
p_err("OOM when allocating BB node");
121
return NULL;
122
}
123
124
INIT_LIST_HEAD(&bb->e_prevs);
125
INIT_LIST_HEAD(&bb->e_succs);
126
list_add(&bb->l, after);
127
128
return bb;
129
}
130
131
static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
132
struct bpf_insn *end)
133
{
134
struct func_node *func, *last_func;
135
136
func = cfg_append_func(cfg, cur);
137
if (!func)
138
return true;
139
140
for (; cur < end; cur++) {
141
if (cur->code != (BPF_JMP | BPF_CALL))
142
continue;
143
if (cur->src_reg != BPF_PSEUDO_CALL)
144
continue;
145
func = cfg_append_func(cfg, cur + cur->off + 1);
146
if (!func)
147
return true;
148
}
149
150
last_func = cfg_last_func(cfg);
151
last_func->end = end - 1;
152
func = cfg_first_func(cfg);
153
list_for_each_entry_from(func, &last_func->l, l) {
154
func->end = func_next(func)->start - 1;
155
}
156
157
return false;
158
}
159
160
static bool is_jmp_insn(__u8 code)
161
{
162
return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
163
}
164
165
static bool func_partition_bb_head(struct func_node *func)
166
{
167
struct bpf_insn *cur, *end;
168
struct bb_node *bb;
169
170
cur = func->start;
171
end = func->end;
172
INIT_LIST_HEAD(&func->bbs);
173
bb = func_append_bb(func, cur);
174
if (!bb)
175
return true;
176
177
for (; cur <= end; cur++) {
178
if (is_jmp_insn(cur->code)) {
179
__u8 opcode = BPF_OP(cur->code);
180
181
if (opcode == BPF_EXIT || opcode == BPF_CALL)
182
continue;
183
184
bb = func_append_bb(func, cur + cur->off + 1);
185
if (!bb)
186
return true;
187
188
if (opcode != BPF_JA) {
189
bb = func_append_bb(func, cur + 1);
190
if (!bb)
191
return true;
192
}
193
}
194
}
195
196
return false;
197
}
198
199
static void func_partition_bb_tail(struct func_node *func)
200
{
201
unsigned int bb_idx = NUM_FIXED_BLOCKS;
202
struct bb_node *bb, *last;
203
204
last = func_last_bb(func);
205
last->tail = func->end;
206
bb = func_first_bb(func);
207
list_for_each_entry_from(bb, &last->l, l) {
208
bb->tail = bb_next(bb)->head - 1;
209
bb->idx = bb_idx++;
210
}
211
212
last->idx = bb_idx++;
213
func->bb_num = bb_idx;
214
}
215
216
static bool func_add_special_bb(struct func_node *func)
217
{
218
struct bb_node *bb;
219
220
bb = func_insert_dummy_bb(&func->bbs);
221
if (!bb)
222
return true;
223
bb->idx = ENTRY_BLOCK_INDEX;
224
225
bb = func_insert_dummy_bb(&func_last_bb(func)->l);
226
if (!bb)
227
return true;
228
bb->idx = EXIT_BLOCK_INDEX;
229
230
return false;
231
}
232
233
static bool func_partition_bb(struct func_node *func)
234
{
235
if (func_partition_bb_head(func))
236
return true;
237
238
func_partition_bb_tail(func);
239
240
return false;
241
}
242
243
static struct bb_node *func_search_bb_with_head(struct func_node *func,
244
struct bpf_insn *insn)
245
{
246
struct bb_node *bb;
247
248
list_for_each_entry(bb, &func->bbs, l) {
249
if (bb->head == insn)
250
return bb;
251
}
252
253
return NULL;
254
}
255
256
static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
257
int flags)
258
{
259
struct edge_node *e;
260
261
e = calloc(1, sizeof(*e));
262
if (!e) {
263
p_err("OOM when allocating edge node");
264
return NULL;
265
}
266
267
if (src)
268
e->src = src;
269
if (dst)
270
e->dst = dst;
271
272
e->flags |= flags;
273
274
return e;
275
}
276
277
static bool func_add_bb_edges(struct func_node *func)
278
{
279
struct bpf_insn *insn;
280
struct edge_node *e;
281
struct bb_node *bb;
282
283
bb = entry_bb(func);
284
e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
285
if (!e)
286
return true;
287
list_add_tail(&e->l, &bb->e_succs);
288
289
bb = exit_bb(func);
290
e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
291
if (!e)
292
return true;
293
list_add_tail(&e->l, &bb->e_prevs);
294
295
bb = entry_bb(func);
296
bb = bb_next(bb);
297
list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
298
e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
299
if (!e)
300
return true;
301
e->src = bb;
302
303
insn = bb->tail;
304
if (!is_jmp_insn(insn->code) ||
305
BPF_OP(insn->code) == BPF_CALL ||
306
BPF_OP(insn->code) == BPF_EXIT) {
307
e->dst = bb_next(bb);
308
e->flags |= EDGE_FLAG_FALLTHROUGH;
309
list_add_tail(&e->l, &bb->e_succs);
310
continue;
311
} else if (BPF_OP(insn->code) == BPF_JA) {
312
e->dst = func_search_bb_with_head(func,
313
insn + insn->off + 1);
314
e->flags |= EDGE_FLAG_JUMP;
315
list_add_tail(&e->l, &bb->e_succs);
316
continue;
317
}
318
319
e->dst = bb_next(bb);
320
e->flags |= EDGE_FLAG_FALLTHROUGH;
321
list_add_tail(&e->l, &bb->e_succs);
322
323
e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
324
if (!e)
325
return true;
326
e->src = bb;
327
e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
328
list_add_tail(&e->l, &bb->e_succs);
329
}
330
331
return false;
332
}
333
334
static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
335
{
336
int cnt = len / sizeof(*insn);
337
struct func_node *func;
338
339
INIT_LIST_HEAD(&cfg->funcs);
340
341
if (cfg_partition_funcs(cfg, insn, insn + cnt))
342
return true;
343
344
list_for_each_entry(func, &cfg->funcs, l) {
345
if (func_partition_bb(func) || func_add_special_bb(func))
346
return true;
347
348
if (func_add_bb_edges(func))
349
return true;
350
}
351
352
return false;
353
}
354
355
static void cfg_destroy(struct cfg *cfg)
356
{
357
struct func_node *func, *func2;
358
359
list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
360
struct bb_node *bb, *bb2;
361
362
list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
363
struct edge_node *e, *e2;
364
365
list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
366
list_del(&e->l);
367
free(e);
368
}
369
370
list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
371
list_del(&e->l);
372
free(e);
373
}
374
375
list_del(&bb->l);
376
free(bb);
377
}
378
379
list_del(&func->l);
380
free(func);
381
}
382
}
383
384
static void
385
draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
386
bool opcodes, bool linum)
387
{
388
const char *shape;
389
390
if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
391
shape = "Mdiamond";
392
else
393
shape = "record";
394
395
printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
396
func->idx, bb->idx, shape);
397
398
if (bb->idx == ENTRY_BLOCK_INDEX) {
399
printf("ENTRY");
400
} else if (bb->idx == EXIT_BLOCK_INDEX) {
401
printf("EXIT");
402
} else {
403
unsigned int start_idx;
404
printf("{\\\n");
405
start_idx = bb->head - func->start;
406
dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
407
opcodes, linum);
408
printf("}");
409
}
410
411
printf("\"];\n\n");
412
}
413
414
static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
415
{
416
const char *style = "\"solid,bold\"";
417
const char *color = "black";
418
int func_idx = func->idx;
419
struct edge_node *e;
420
int weight = 10;
421
422
if (list_empty(&bb->e_succs))
423
return;
424
425
list_for_each_entry(e, &bb->e_succs, l) {
426
printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
427
func_idx, e->src->idx, func_idx, e->dst->idx,
428
style, color, weight);
429
printf("];\n");
430
}
431
}
432
433
static void
434
func_output_bb_def(struct func_node *func, struct dump_data *dd,
435
bool opcodes, bool linum)
436
{
437
struct bb_node *bb;
438
439
list_for_each_entry(bb, &func->bbs, l) {
440
draw_bb_node(func, bb, dd, opcodes, linum);
441
}
442
}
443
444
static void func_output_edges(struct func_node *func)
445
{
446
int func_idx = func->idx;
447
struct bb_node *bb;
448
449
list_for_each_entry(bb, &func->bbs, l) {
450
draw_bb_succ_edges(func, bb);
451
}
452
453
/* Add an invisible edge from ENTRY to EXIT, this is to
454
* improve the graph layout.
455
*/
456
printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
457
func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
458
}
459
460
static void
461
cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
462
{
463
struct func_node *func;
464
465
printf("digraph \"DOT graph for eBPF program\" {\n");
466
list_for_each_entry(func, &cfg->funcs, l) {
467
printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
468
func->idx, func->idx);
469
func_output_bb_def(func, dd, opcodes, linum);
470
func_output_edges(func);
471
printf("}\n");
472
}
473
printf("}\n");
474
}
475
476
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
477
bool opcodes, bool linum)
478
{
479
struct bpf_insn *insn = buf;
480
struct cfg cfg;
481
482
memset(&cfg, 0, sizeof(cfg));
483
if (cfg_build(&cfg, insn, len))
484
return;
485
486
cfg_dump(&cfg, dd, opcodes, linum);
487
488
cfg_destroy(&cfg);
489
}
490
491