Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/core/net_test.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
3
#include <kunit/test.h>
4
5
/* GSO */
6
7
#include <linux/skbuff.h>
8
9
static const char hdr[] = "abcdefgh";
10
#define GSO_TEST_SIZE 1000
11
12
static void __init_skb(struct sk_buff *skb)
13
{
14
skb_reset_mac_header(skb);
15
memcpy(skb_mac_header(skb), hdr, sizeof(hdr));
16
17
/* skb_segment expects skb->data at start of payload */
18
skb_pull(skb, sizeof(hdr));
19
skb_reset_network_header(skb);
20
skb_reset_transport_header(skb);
21
22
/* proto is arbitrary, as long as not ETH_P_TEB or vlan */
23
skb->protocol = htons(ETH_P_ATALK);
24
skb_shinfo(skb)->gso_size = GSO_TEST_SIZE;
25
}
26
27
enum gso_test_nr {
28
GSO_TEST_LINEAR,
29
GSO_TEST_NO_GSO,
30
GSO_TEST_FRAGS,
31
GSO_TEST_FRAGS_PURE,
32
GSO_TEST_GSO_PARTIAL,
33
GSO_TEST_FRAG_LIST,
34
GSO_TEST_FRAG_LIST_PURE,
35
GSO_TEST_FRAG_LIST_NON_UNIFORM,
36
GSO_TEST_GSO_BY_FRAGS,
37
};
38
39
struct gso_test_case {
40
enum gso_test_nr id;
41
const char *name;
42
43
/* input */
44
unsigned int linear_len;
45
unsigned int nr_frags;
46
const unsigned int *frags;
47
unsigned int nr_frag_skbs;
48
const unsigned int *frag_skbs;
49
50
/* output as expected */
51
unsigned int nr_segs;
52
const unsigned int *segs;
53
};
54
55
static struct gso_test_case cases[] = {
56
{
57
.id = GSO_TEST_NO_GSO,
58
.name = "no_gso",
59
.linear_len = GSO_TEST_SIZE,
60
.nr_segs = 1,
61
.segs = (const unsigned int[]) { GSO_TEST_SIZE },
62
},
63
{
64
.id = GSO_TEST_LINEAR,
65
.name = "linear",
66
.linear_len = GSO_TEST_SIZE + GSO_TEST_SIZE + 1,
67
.nr_segs = 3,
68
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
69
},
70
{
71
.id = GSO_TEST_FRAGS,
72
.name = "frags",
73
.linear_len = GSO_TEST_SIZE,
74
.nr_frags = 2,
75
.frags = (const unsigned int[]) { GSO_TEST_SIZE, 1 },
76
.nr_segs = 3,
77
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
78
},
79
{
80
.id = GSO_TEST_FRAGS_PURE,
81
.name = "frags_pure",
82
.nr_frags = 3,
83
.frags = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
84
.nr_segs = 3,
85
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
86
},
87
{
88
.id = GSO_TEST_GSO_PARTIAL,
89
.name = "gso_partial",
90
.linear_len = GSO_TEST_SIZE,
91
.nr_frags = 2,
92
.frags = (const unsigned int[]) { GSO_TEST_SIZE, 3 },
93
.nr_segs = 2,
94
.segs = (const unsigned int[]) { 2 * GSO_TEST_SIZE, 3 },
95
},
96
{
97
/* commit 89319d3801d1: frag_list on mss boundaries */
98
.id = GSO_TEST_FRAG_LIST,
99
.name = "frag_list",
100
.linear_len = GSO_TEST_SIZE,
101
.nr_frag_skbs = 2,
102
.frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
103
.nr_segs = 3,
104
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE },
105
},
106
{
107
.id = GSO_TEST_FRAG_LIST_PURE,
108
.name = "frag_list_pure",
109
.nr_frag_skbs = 2,
110
.frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
111
.nr_segs = 2,
112
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
113
},
114
{
115
/* commit 43170c4e0ba7: GRO of frag_list trains */
116
.id = GSO_TEST_FRAG_LIST_NON_UNIFORM,
117
.name = "frag_list_non_uniform",
118
.linear_len = GSO_TEST_SIZE,
119
.nr_frag_skbs = 4,
120
.frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, 1, GSO_TEST_SIZE, 2 },
121
.nr_segs = 4,
122
.segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE, 3 },
123
},
124
{
125
/* commit 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes") and
126
* commit 90017accff61 ("sctp: Add GSO support")
127
*
128
* "there will be a cover skb with protocol headers and
129
* children ones containing the actual segments"
130
*/
131
.id = GSO_TEST_GSO_BY_FRAGS,
132
.name = "gso_by_frags",
133
.nr_frag_skbs = 4,
134
.frag_skbs = (const unsigned int[]) { 100, 200, 300, 400 },
135
.nr_segs = 4,
136
.segs = (const unsigned int[]) { 100, 200, 300, 400 },
137
},
138
};
139
140
static void gso_test_case_to_desc(struct gso_test_case *t, char *desc)
141
{
142
sprintf(desc, "%s", t->name);
143
}
144
145
KUNIT_ARRAY_PARAM(gso_test, cases, gso_test_case_to_desc);
146
147
static void gso_test_func(struct kunit *test)
148
{
149
const int shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
150
struct sk_buff *skb, *segs, *cur, *next, *last;
151
const struct gso_test_case *tcase;
152
netdev_features_t features;
153
struct page *page;
154
int i;
155
156
tcase = test->param_value;
157
158
page = alloc_page(GFP_KERNEL);
159
KUNIT_ASSERT_NOT_NULL(test, page);
160
skb = build_skb(page_address(page), sizeof(hdr) + tcase->linear_len + shinfo_size);
161
KUNIT_ASSERT_NOT_NULL(test, skb);
162
__skb_put(skb, sizeof(hdr) + tcase->linear_len);
163
164
__init_skb(skb);
165
166
if (tcase->nr_frags) {
167
unsigned int pg_off = 0;
168
169
page = alloc_page(GFP_KERNEL);
170
KUNIT_ASSERT_NOT_NULL(test, page);
171
page_ref_add(page, tcase->nr_frags - 1);
172
173
for (i = 0; i < tcase->nr_frags; i++) {
174
skb_fill_page_desc(skb, i, page, pg_off, tcase->frags[i]);
175
pg_off += tcase->frags[i];
176
}
177
178
KUNIT_ASSERT_LE(test, pg_off, PAGE_SIZE);
179
180
skb->data_len = pg_off;
181
skb->len += skb->data_len;
182
skb->truesize += skb->data_len;
183
}
184
185
if (tcase->frag_skbs) {
186
unsigned int total_size = 0, total_true_size = 0;
187
struct sk_buff *frag_skb, *prev = NULL;
188
189
for (i = 0; i < tcase->nr_frag_skbs; i++) {
190
unsigned int frag_size;
191
192
page = alloc_page(GFP_KERNEL);
193
KUNIT_ASSERT_NOT_NULL(test, page);
194
195
frag_size = tcase->frag_skbs[i];
196
frag_skb = build_skb(page_address(page),
197
frag_size + shinfo_size);
198
KUNIT_ASSERT_NOT_NULL(test, frag_skb);
199
__skb_put(frag_skb, frag_size);
200
201
if (prev)
202
prev->next = frag_skb;
203
else
204
skb_shinfo(skb)->frag_list = frag_skb;
205
prev = frag_skb;
206
207
total_size += frag_size;
208
total_true_size += frag_skb->truesize;
209
}
210
211
skb->len += total_size;
212
skb->data_len += total_size;
213
skb->truesize += total_true_size;
214
215
if (tcase->id == GSO_TEST_GSO_BY_FRAGS)
216
skb_shinfo(skb)->gso_size = GSO_BY_FRAGS;
217
}
218
219
features = NETIF_F_SG | NETIF_F_HW_CSUM;
220
if (tcase->id == GSO_TEST_GSO_PARTIAL)
221
features |= NETIF_F_GSO_PARTIAL;
222
223
/* TODO: this should also work with SG,
224
* rather than hit BUG_ON(i >= nfrags)
225
*/
226
if (tcase->id == GSO_TEST_FRAG_LIST_NON_UNIFORM)
227
features &= ~NETIF_F_SG;
228
229
segs = skb_segment(skb, features);
230
if (IS_ERR(segs)) {
231
KUNIT_FAIL(test, "segs error %pe", segs);
232
goto free_gso_skb;
233
} else if (!segs) {
234
KUNIT_FAIL(test, "no segments");
235
goto free_gso_skb;
236
}
237
238
last = segs->prev;
239
for (cur = segs, i = 0; cur; cur = next, i++) {
240
next = cur->next;
241
242
KUNIT_ASSERT_EQ(test, cur->len, sizeof(hdr) + tcase->segs[i]);
243
244
/* segs have skb->data pointing to the mac header */
245
KUNIT_ASSERT_PTR_EQ(test, skb_mac_header(cur), cur->data);
246
KUNIT_ASSERT_PTR_EQ(test, skb_network_header(cur), cur->data + sizeof(hdr));
247
248
/* header was copied to all segs */
249
KUNIT_ASSERT_EQ(test, memcmp(skb_mac_header(cur), hdr, sizeof(hdr)), 0);
250
251
/* last seg can be found through segs->prev pointer */
252
if (!next)
253
KUNIT_ASSERT_PTR_EQ(test, cur, last);
254
255
consume_skb(cur);
256
}
257
258
KUNIT_ASSERT_EQ(test, i, tcase->nr_segs);
259
260
free_gso_skb:
261
consume_skb(skb);
262
}
263
264
/* IP tunnel flags */
265
266
#include <net/ip_tunnels.h>
267
268
struct ip_tunnel_flags_test {
269
const char *name;
270
271
const u16 *src_bits;
272
const u16 *exp_bits;
273
u8 src_num;
274
u8 exp_num;
275
276
__be16 exp_val;
277
bool exp_comp;
278
};
279
280
#define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \
281
.name = (n), \
282
.src_bits = (src), \
283
.src_num = ARRAY_SIZE(src), \
284
.exp_comp = (comp), \
285
.exp_val = (eval), \
286
.exp_bits = (exp), \
287
.exp_num = ARRAY_SIZE(exp), \
288
}
289
290
/* These are __be16-compatible and can be compared as is */
291
static const u16 ip_tunnel_flags_1[] = {
292
IP_TUNNEL_KEY_BIT,
293
IP_TUNNEL_STRICT_BIT,
294
IP_TUNNEL_ERSPAN_OPT_BIT,
295
};
296
297
/* Due to the previous flags design limitation, setting either
298
* ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT``
299
* (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they
300
* correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is
301
* backward-compatible.
302
*/
303
#ifdef __LITTLE_ENDIAN
304
#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT
305
#else
306
#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT
307
#endif
308
309
static const u16 ip_tunnel_flags_2_src[] = {
310
IP_TUNNEL_CONFLICT_BIT,
311
};
312
313
static const u16 ip_tunnel_flags_2_exp[] = {
314
IP_TUNNEL_CONFLICT_BIT,
315
IP_TUNNEL_SIT_ISATAP_BIT,
316
};
317
318
/* Bits 17 and higher are not compatible with __be16 flags */
319
static const u16 ip_tunnel_flags_3_src[] = {
320
IP_TUNNEL_VXLAN_OPT_BIT,
321
17,
322
18,
323
20,
324
};
325
326
static const u16 ip_tunnel_flags_3_exp[] = {
327
IP_TUNNEL_VXLAN_OPT_BIT,
328
};
329
330
static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = {
331
IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true,
332
cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) |
333
BIT(IP_TUNNEL_STRICT_BIT) |
334
BIT(IP_TUNNEL_ERSPAN_OPT_BIT)),
335
ip_tunnel_flags_1),
336
IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true,
337
VTI_ISVTI, ip_tunnel_flags_2_exp),
338
IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false,
339
cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)),
340
ip_tunnel_flags_3_exp),
341
};
342
343
static void
344
ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t,
345
char *desc)
346
{
347
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
348
}
349
KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test,
350
ip_tunnel_flags_test_case_to_desc);
351
352
static void ip_tunnel_flags_test_run(struct kunit *test)
353
{
354
const struct ip_tunnel_flags_test *t = test->param_value;
355
IP_TUNNEL_DECLARE_FLAGS(src) = { };
356
IP_TUNNEL_DECLARE_FLAGS(exp) = { };
357
IP_TUNNEL_DECLARE_FLAGS(out);
358
359
for (u32 j = 0; j < t->src_num; j++)
360
__set_bit(t->src_bits[j], src);
361
for (u32 j = 0; j < t->exp_num; j++)
362
__set_bit(t->exp_bits[j], exp);
363
364
KUNIT_ASSERT_EQ(test, t->exp_comp,
365
ip_tunnel_flags_is_be16_compat(src));
366
KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val,
367
(__force u16)ip_tunnel_flags_to_be16(src));
368
369
ip_tunnel_flags_from_be16(out, t->exp_val);
370
KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out));
371
}
372
373
static struct kunit_case net_test_cases[] = {
374
KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
375
KUNIT_CASE_PARAM(ip_tunnel_flags_test_run,
376
ip_tunnel_flags_test_gen_params),
377
{ },
378
};
379
380
static struct kunit_suite net_test_suite = {
381
.name = "net_core",
382
.test_cases = net_test_cases,
383
};
384
kunit_test_suite(net_test_suite);
385
386
MODULE_DESCRIPTION("KUnit tests for networking core");
387
MODULE_LICENSE("GPL");
388
389